aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/acpi/arm64/iort.c11
-rw-r--r--drivers/acpi/device_sysfs.c6
-rw-r--r--drivers/acpi/pci_mcfg.c12
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/sleep.c4
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/sata_rcar.c1
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/memory.c24
-rw-r--r--drivers/base/power/domain.c8
-rw-r--r--drivers/block/brd.c6
-rw-r--r--drivers/block/rbd.c24
-rw-r--r--drivers/bus/tegra-aconnect.c66
-rw-r--r--drivers/bus/ti-sysc.c661
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c1
-rw-r--r--drivers/clk/axs10x/pll_clock.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1
-rw-r--r--drivers/clk/bcm/clk-kona.c3
-rw-r--r--drivers/clk/berlin/berlin2-div.c1
-rw-r--r--drivers/clk/berlin/bg2.c1
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-fixed-mmio.c3
-rw-r--r--drivers/clk/clk-fractional-divider.c1
-rw-r--r--drivers/clk/clk-hsdk-pll.c1
-rw-r--r--drivers/clk/clk-multiplier.c1
-rw-r--r--drivers/clk/davinci/pll-da850.c1
-rw-r--r--drivers/clk/h8300/clk-div.c1
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c3
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c1
-rw-r--r--drivers/clk/imx/clk-composite-8m.c3
-rw-r--r--drivers/clk/imx/clk-frac-pll.c1
-rw-r--r--drivers/clk/imx/clk-imx21.c1
-rw-r--r--drivers/clk/imx/clk-imx27.c1
-rw-r--r--drivers/clk/imx/clk-pfdv2.c1
-rw-r--r--drivers/clk/imx/clk-pllv4.c1
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c1
-rw-r--r--drivers/clk/ingenic/cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c1
-rw-r--r--drivers/clk/loongson1/clk-loongson1c.c1
-rw-r--r--drivers/clk/microchip/clk-core.c1
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c1
-rw-r--r--drivers/clk/mvebu/clk-corediv.c1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c1
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c1
-rw-r--r--drivers/clk/pxa/clk-pxa.c1
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c1
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c1
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c1
-rw-r--r--drivers/clk/renesas/clk-rz.c1
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c1
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c1
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c1
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c3
-rw-r--r--drivers/clk/rockchip/clk-px30.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1108.c1
-rw-r--r--drivers/clk/rockchip/clk.c1
-rw-r--r--drivers/clk/samsung/clk-cpu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c1
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c1
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c1
-rw-r--r--drivers/clk/samsung/clk-pll.c3
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c1
-rw-r--r--drivers/clk/samsung/clk.c1
-rw-r--r--drivers/clk/sifive/fu540-prci.c1
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c1
-rw-r--r--drivers/clk/st/clkgen-mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-mod1.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c1
-rw-r--r--drivers/clk/sunxi/clk-mod0.c1
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-display.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-pll3.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-tcon-ch1.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-cpus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1
-rw-r--r--drivers/clk/sunxi/clk-usb.c1
-rw-r--r--drivers/clk/tegra/clk-emc.c1
-rw-r--r--drivers/clk/tegra/clk-periph-fixed.c1
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c1
-rw-r--r--drivers/clk/tegra/clk.c1
-rw-r--r--drivers/clk/ti/adpll.c1
-rw-r--r--drivers/clk/ti/clk.c1
-rw-r--r--drivers/clk/ti/fapll.c1
-rw-r--r--drivers/clk/versatile/clk-sp810.c1
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c1
-rw-r--r--drivers/clk/zynqmp/clkc.c4
-rw-r--r--drivers/clocksource/Kconfig7
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-ixp4xx.c282
-rw-r--r--drivers/cpufreq/cpufreq.c140
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c1
-rw-r--r--drivers/crypto/caam/caamalg.c9
-rw-r--r--drivers/crypto/caam/caamalg_qi.c7
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c8
-rw-r--r--drivers/crypto/caam/error.c2
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/caam/regs.h8
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c17
-rw-r--r--drivers/crypto/chelsio/chcr_core.c4
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c3
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dax/Kconfig3
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/dax/pmem/core.c6
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/edac/Kconfig4
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/firewire/core-iso.c15
-rw-r--r--drivers/firmware/Kconfig16
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/driver.c8
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c168
-rw-r--r--drivers/firmware/imx/imx-scu.c6
-rw-r--r--drivers/firmware/imx/scu-pd.c121
-rw-r--r--drivers/firmware/trusted_foundations.c176
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c18
-rw-r--r--drivers/firmware/xilinx/zynqmp.c56
-rw-r--r--drivers/fpga/Kconfig9
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/zynqmp-fpga.c159
-rw-r--r--drivers/gpio/Kconfig103
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-74x164.c22
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c4
-rw-r--r--drivers/gpio/gpio-amdpt.c8
-rw-r--r--drivers/gpio/gpio-aspeed.c4
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-cadence.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c7
-rw-r--r--drivers/gpio/gpio-dwapb.c4
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-hlwd.c4
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-ixp4xx.c474
-rw-r--r--drivers/gpio/gpio-janz-ttl.c4
-rw-r--r--drivers/gpio/gpio-loongson1.c4
-rw-r--r--drivers/gpio/gpio-lpc18xx.c5
-rw-r--r--drivers/gpio/gpio-max77650.c190
-rw-r--r--drivers/gpio/gpio-mb86s7x.c4
-rw-r--r--drivers/gpio/gpio-mlxbf.c152
-rw-r--r--drivers/gpio/gpio-mmio.c99
-rw-r--r--drivers/gpio/gpio-mt7621.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c7
-rw-r--r--drivers/gpio/gpio-mxc.c4
-rw-r--r--drivers/gpio/gpio-octeon.c4
-rw-r--r--drivers/gpio/gpio-omap.c644
-rw-r--r--drivers/gpio/gpio-pca953x.c25
-rw-r--r--drivers/gpio/gpio-pxa.c12
-rw-r--r--drivers/gpio/gpio-rcar.c5
-rw-r--r--drivers/gpio/gpio-sch.c5
-rw-r--r--drivers/gpio/gpio-spear-spics.c4
-rw-r--r--drivers/gpio/gpio-sprd.c4
-rw-r--r--drivers/gpio/gpio-sta2x11.c5
-rw-r--r--drivers/gpio/gpio-stp-xway.c4
-rw-r--r--drivers/gpio/gpio-tb10x.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-timberdale.c4
-rw-r--r--drivers/gpio/gpio-ts4800.c4
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpio-vf610.c92
-rw-r--r--drivers/gpio/gpio-xgene-sb.c4
-rw-r--r--drivers/gpio/gpio-xlp.c7
-rw-r--r--drivers/gpio/gpio-zx.c4
-rw-r--r--drivers/gpio/gpio-zynq.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c115
-rw-r--r--drivers/gpio/gpiolib-of.c24
-rw-r--r--drivers/gpio/gpiolib.c43
-rw-r--r--drivers/gpio/gpiolib.h19
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h16
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_request.c60
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c78
-rw-r--r--drivers/gpu/drm/i915/intel_context.c1
-rw-r--r--drivers/gpu/drm/i915/intel_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c3
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c17
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c1
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c3
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c18
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c6
-rw-r--r--drivers/hwmon/gpio-fan.c25
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/mlxreg-fan.c31
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c6
-rw-r--r--drivers/hwmon/pwm-fan.c97
-rw-r--r--drivers/iio/inkern.c22
-rw-r--r--drivers/infiniband/core/addr.c16
-rw-r--r--drivers/infiniband/core/nldev.c27
-rw-r--r--drivers/infiniband/core/umem.c5
-rw-r--r--drivers/infiniband/core/umem_odp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c3
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c9
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/input/keyboard/Kconfig13
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c8
-rw-r--r--drivers/input/keyboard/qt1050.c598
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c30
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c38
-rw-r--r--drivers/input/misc/Kconfig21
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/gpio-vibra.c207
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c20
-rw-r--r--drivers/input/misc/max77650-onkey.c121
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c21
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/serio/hyperv-keyboard.c2
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/touchscreen/Kconfig10
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c23
-rw-r--r--drivers/input/touchscreen/goodix.c54
-rw-r--r--drivers/input/touchscreen/iqs5xx.c1133
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/amd_iommu.c54
-rw-r--r--drivers/iommu/amd_iommu_init.c8
-rw-r--r--drivers/iommu/amd_iommu_types.h6
-rw-r--r--drivers/iommu/arm-smmu-regs.h2
-rw-r--r--drivers/iommu/arm-smmu-v3.c355
-rw-r--r--drivers/iommu/arm-smmu.c11
-rw-r--r--drivers/iommu/dma-iommu.c47
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c586
-rw-r--r--drivers/iommu/intel-pasid.c4
-rw-r--r--drivers/iommu/intel-svm.c19
-rw-r--r--drivers/iommu/intel_irq_remapping.c9
-rw-r--r--drivers/iommu/iommu.c211
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c8
-rw-r--r--drivers/iommu/tegra-smmu.c41
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-ixp4xx.c403
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-max77650.c147
-rw-r--r--drivers/lightnvm/core.c82
-rw-r--r--drivers/lightnvm/pblk-cache.c8
-rw-r--r--drivers/lightnvm/pblk-core.c65
-rw-r--r--drivers/lightnvm/pblk-gc.c52
-rw-r--r--drivers/lightnvm/pblk-init.c65
-rw-r--r--drivers/lightnvm/pblk-map.c1
-rw-r--r--drivers/lightnvm/pblk-rb.c13
-rw-r--r--drivers/lightnvm/pblk-read.c394
-rw-r--r--drivers/lightnvm/pblk-recovery.c74
-rw-r--r--drivers/lightnvm/pblk-write.c1
-rw-r--r--drivers/lightnvm/pblk.h28
-rw-r--r--drivers/mailbox/Kconfig10
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c225
-rw-r--r--drivers/mailbox/imx-mailbox.c4
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c1
-rw-r--r--drivers/mailbox/stm32-ipcc.c13
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-dust.c515
-rw-r--r--drivers/md/dm-exception-store.h3
-rw-r--r--drivers/md/dm-init.c8
-rw-r--r--drivers/md/dm-integrity.c717
-rw-r--r--drivers/md/dm-ioctl.c6
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-rq.c8
-rw-r--r--drivers/md/dm-snap.c359
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin-metadata.c139
-rw-r--r--drivers/md/dm-writecache.c29
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c6
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c22
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h19
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c46
-rw-r--r--drivers/media/platform/coda/coda-common.c10
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c15
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c68
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c6
-rw-r--r--drivers/memory/atmel-ebi.c37
-rw-r--r--drivers/memory/emif.h4
-rw-r--r--drivers/memory/tegra/mc.c34
-rw-r--r--drivers/memory/tegra/mc.h2
-rw-r--r--drivers/memory/tegra/tegra114.c4
-rw-r--r--drivers/memory/tegra/tegra124-emc.c1
-rw-r--r--drivers/memory/tegra/tegra124.c4
-rw-r--r--drivers/memory/tegra/tegra20.c28
-rw-r--r--drivers/memory/tegra/tegra210.c2
-rw-r--r--drivers/memory/tegra/tegra30.c4
-rw-r--r--drivers/memory/ti-emif-pm.c3
-rw-r--r--drivers/memory/ti-emif-sram-pm.S41
-rw-r--r--drivers/mfd/Kconfig99
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab8500-debugfs.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c211
-rw-r--r--drivers/mfd/atmel-hlcdc.c1
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/axp20x.c16
-rw-r--r--drivers/mfd/cros_ec.c39
-rw-r--r--drivers/mfd/cros_ec_dev.c36
-rw-r--r--drivers/mfd/cs47l35-tables.c2
-rw-r--r--drivers/mfd/cs47l90-tables.c2
-rw-r--r--drivers/mfd/da9063-core.c28
-rw-r--r--drivers/mfd/da9063-i2c.c10
-rw-r--r--drivers/mfd/da9063-irq.c10
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel-lpss.c4
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c10
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c1
-rw-r--r--drivers/mfd/max77620.c87
-rw-r--r--drivers/mfd/max77650.c232
-rw-r--r--drivers/mfd/mfd-core.c13
-rw-r--r--drivers/mfd/rk808.c9
-rw-r--r--drivers/mfd/sec-core.c59
-rw-r--r--drivers/mfd/sec-irq.c3
-rw-r--r--drivers/mfd/ssbi.c6
-rw-r--r--drivers/mfd/stmfx.c545
-rw-r--r--drivers/mfd/sun6i-prcm.c3
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/t7l66xb.c12
-rw-r--r--drivers/mfd/tc6387xb.c12
-rw-r--r--drivers/mfd/tc6393xb.c23
-rw-r--r--drivers/mfd/tps65912-spi.c1
-rw-r--r--drivers/mfd/twl6040.c13
-rw-r--r--drivers/misc/Kconfig24
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c18
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c6
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c1
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/Kconfig20
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/afs.c266
-rw-r--r--drivers/mtd/bcm63xxpart.c163
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c6
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c1
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/maps/physmap-gemini.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c8
-rw-r--r--drivers/mtd/mtdpart.c2
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/core.c34
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c5
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c3
-rw-r--r--drivers/mtd/nand/raw/Kconfig393
-rw-r--r--drivers/mtd/nand/raw/Makefile9
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c127
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c5
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h6
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c7
-rw-r--r--drivers/mtd/nand/raw/denali.c1152
-rw-r--r--drivers/mtd/nand/raw/denali.h117
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c98
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c38
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c201
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c4
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h1
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig50
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile7
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c166
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.h83
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand.c530
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c295
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_ecc.c197
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_nand.c (renamed from drivers/mtd/nand/raw/jz4740_nand.c)7
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c (renamed from drivers/mtd/nand/raw/jz4780_bch.c)182
-rw-r--r--drivers/mtd/nand/raw/internals.h3
-rw-r--r--drivers/mtd/nand/raw/jz4780_bch.h43
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c415
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c51
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c30
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_amd.c19
-rw-r--r--drivers/mtd/nand/raw/nand_base.c324
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c73
-rw-r--r--drivers/mtd/nand/raw/nand_esmt.c19
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c94
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c27
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c16
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c35
-rw-r--r--drivers/mtd/nand/raw/nand_samsung.c46
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c21
-rw-r--r--drivers/mtd/nand/raw/nandsim.c144
-rw-r--r--drivers/mtd/nand/raw/nuc900_nand.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c4
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c8
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c13
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c90
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c8
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c5
-rw-r--r--drivers/mtd/nand/spi/core.c169
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c8
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/nand/spi/micron.c2
-rw-r--r--drivers/mtd/nand/spi/toshiba.c12
-rw-r--r--drivers/mtd/nand/spi/winbond.c4
-rw-r--r--drivers/mtd/parsers/Kconfig27
-rw-r--r--drivers/mtd/parsers/Makefile2
-rw-r--r--drivers/mtd/parsers/afs.c410
-rw-r--r--drivers/mtd/parsers/parser_imagetag.c222
-rw-r--r--drivers/mtd/sm_ftl.c12
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c8
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c10
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c16
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c62
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c14
-rw-r--r--drivers/net/ieee802154/ca8210.c1
-rw-r--r--drivers/net/phy/mdio-mux-meson-g12a.c2
-rw-r--r--drivers/net/phy/realtek.c16
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c2
-rw-r--r--drivers/nvdimm/label.c29
-rw-r--r--drivers/nvdimm/namespace_devs.c15
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvme/host/core.c79
-rw-r--r--drivers/nvme/host/fabrics.c4
-rw-r--r--drivers/nvme/host/fc.c14
-rw-r--r--drivers/nvme/host/lightnvm.c1
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c34
-rw-r--r--drivers/nvme/host/trace.h1
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c10
-rw-r--r--drivers/of/of_net.c34
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/controller/dwc/Kconfig29
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c3
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c144
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c926
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c93
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c55
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c157
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c64
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h26
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c11
-rw-r--r--drivers/pci/controller/pci-aardvark.c13
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c23
-rw-r--r--drivers/pci/controller/pci-tegra.c37
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c98
-rw-r--r--drivers/pci/controller/pcie-mediatek.c51
-rw-r--r--drivers/pci/controller/pcie-rcar.c85
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c1
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c9
-rw-r--r--drivers/pci/controller/pcie-xilinx.c12
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c10
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c10
-rw-r--r--drivers/pci/hotplug/pciehp.h31
-rw-r--r--drivers/pci/hotplug/pciehp_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c17
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c3
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/of.c58
-rw-r--r--drivers/pci/p2pdma.c38
-rw-r--r--drivers/pci/pci-acpi.c183
-rw-r--r--drivers/pci/pci-stub.c10
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pci/pci.c344
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer.c30
-rw-r--r--drivers/pci/pcie/aer_inject.c20
-rw-r--r--drivers/pci/pcie/aspm.c47
-rw-r--r--drivers/pci/pcie/bw_notification.c14
-rw-r--r--drivers/pci/pcie/dpc.c37
-rw-r--r--drivers/pci/pcie/pme.c10
-rw-r--r--drivers/pci/probe.c230
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c92
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-bus.c526
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/switch/switchtec.c42
-rw-r--r--drivers/pci/xen-pcifront.c9
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pinctrl/Kconfig14
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c110
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c819
-rw-r--r--drivers/platform/chrome/Kconfig24
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c74
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c21
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c258
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c80
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c124
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h51
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c262
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c89
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c53
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/mellanox/Kconfig12
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo-regs.h63
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c1281
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/alienware-wmi.c19
-rw-r--r--drivers/platform/x86/asus-wmi.c37
-rw-r--r--drivers/platform/x86/dell-laptop.c6
-rw-r--r--drivers/platform/x86/dell-rbtn.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c321
-rw-r--r--drivers/platform/x86/intel_mrfld_pwrbtn.c107
-rw-r--r--drivers/platform/x86/intel_pmc_core.c172
-rw-r--r--drivers/platform/x86/intel_pmc_core.h7
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c46
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c8
-rw-r--r--drivers/platform/x86/mlx-platform.c228
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c146
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c51
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c19
-rw-r--r--drivers/power/supply/Kconfig36
-rw-r--r--drivers/power/supply/Makefile5
-rw-r--r--drivers/power/supply/ab8500_bmdata.c1
-rw-r--r--drivers/power/supply/axp20x_usb_power.c179
-rw-r--r--drivers/power/supply/axp288_charger.c4
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c20
-rw-r--r--drivers/power/supply/bq27xxx_battery.c3
-rw-r--r--drivers/power/supply/charger-manager.c3
-rw-r--r--drivers/power/supply/cpcap-battery.c44
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/gpio-charger.c57
-rw-r--r--drivers/power/supply/ingenic-battery.c184
-rw-r--r--drivers/power/supply/lt3651-charger.c (renamed from drivers/power/supply/ltc3651-charger.c)123
-rw-r--r--drivers/power/supply/max14656_charger_detector.c27
-rw-r--r--drivers/power/supply/max77650-charger.c368
-rw-r--r--drivers/power/supply/olpc_battery.c171
-rw-r--r--drivers/power/supply/power_supply_core.c38
-rw-r--r--drivers/power/supply/power_supply_sysfs.c6
-rw-r--r--drivers/power/supply/ucs1002_power.c646
-rw-r--r--drivers/pps/clients/pps-gpio.c153
-rw-r--r--drivers/pwm/Kconfig16
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c11
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-ep93xx.c2
-rw-r--r--drivers/pwm/pwm-img.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c449
-rw-r--r--drivers/pwm/pwm-imx27.c4
-rw-r--r--drivers/pwm/pwm-meson.c64
-rw-r--r--drivers/pwm/pwm-pca9685.c1
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/sysfs.c16
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/rapidio/rio_cm.c8
-rw-r--r--drivers/reset/reset-zynqmp.c8
-rw-r--r--drivers/rtc/rtc-omap.c49
-rw-r--r--drivers/rtc/rtc-wilco-ec.c63
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/cio/qdio_main.c19
-rw-r--r--drivers/s390/cio/trace.c1
-rw-r--r--drivers/s390/cio/trace.h23
-rw-r--r--drivers/s390/virtio/virtio_ccw.c52
-rw-r--r--drivers/sbus/char/oradax.c2
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/sh/intc/userimask.c2
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c160
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c43
-rw-r--r--drivers/soc/aspeed/Kconfig31
-rw-r--r--drivers/soc/aspeed/Makefile3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c (renamed from drivers/misc/aspeed-lpc-ctrl.c)0
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c (renamed from drivers/misc/aspeed-lpc-snoop.c)0
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c (renamed from drivers/misc/aspeed-p2a-ctrl.c)0
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpc.c17
-rw-r--r--drivers/soc/imx/gpcv2.c43
-rw-r--r--drivers/soc/imx/soc-imx8.c115
-rw-r--r--drivers/soc/ixp4xx/Kconfig16
-rw-r--r--drivers/soc/ixp4xx/Makefile2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c762
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c488
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c111
-rw-r--r--drivers/soc/qcom/cmd-db.c4
-rw-r--r--drivers/soc/qcom/qmi_interface.c7
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c21
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/renesas/renesas-soc.c3
-rw-r--r--drivers/soc/rockchip/grf.c2
-rw-r--r--drivers/soc/tegra/pmc.c171
-rw-r--r--drivers/soc/ti/Kconfig5
-rw-r--r--drivers/soc/ti/pm33xx.c273
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c18
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c10
-rw-r--r--drivers/spi/spi-rockchip.c1
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c6
-rw-r--r--drivers/staging/gasket/gasket_page_table.c4
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c2
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c6
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/media/imx/imx-media.h3
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c2
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c14
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c3
-rw-r--r--drivers/staging/olpc_dcon/Kconfig1
-rw-r--r--drivers/tee/optee/core.c80
-rw-r--r--drivers/tee/tee_shm.c2
-rw-r--r--drivers/thermal/Kconfig19
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c8
-rw-r--r--drivers/thermal/cpu_cooling.c30
-rw-r--r--drivers/thermal/intel/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c13
-rw-r--r--drivers/thermal/of-thermal.c3
-rw-r--r--drivers/thermal/qcom/Kconfig1
-rw-r--r--drivers/thermal/qcom/Makefile4
-rw-r--r--drivers/thermal/qcom/tsens-8916.c105
-rw-r--r--drivers/thermal/qcom/tsens-8960.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c159
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c (renamed from drivers/thermal/qcom/tsens-8974.c)166
-rw-r--r--drivers/thermal/qcom/tsens-v1.c193
-rw-r--r--drivers/thermal/qcom/tsens-v2.c111
-rw-r--r--drivers/thermal/qcom/tsens.c100
-rw-r--r--drivers/thermal/qcom/tsens.h291
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c51
-rw-r--r--drivers/thermal/rcar_thermal.c11
-rw-r--r--drivers/thermal/rockchip_thermal.c74
-rw-r--r--drivers/thermal/st/Kconfig22
-rw-r--r--drivers/thermal/st/stm_thermal.c6
-rw-r--r--drivers/thermal/tegra/Kconfig4
-rw-r--r--drivers/thermal/tegra/soctherm.c961
-rw-r--r--drivers/thermal/tegra/soctherm.h16
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c9
-rw-r--r--drivers/thermal/thermal_core.c80
-rw-r--r--drivers/thermal/thermal_mmio.c129
-rw-r--r--drivers/tty/hvc/hvc_riscv_sbi.c1
-rw-r--r--drivers/tty/sysrq.c6
-rw-r--r--drivers/usb/host/ohci-da8xx.c42
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/vfio/mdev/mdev_core.c36
-rw-r--r--drivers/vfio/mdev/mdev_private.h2
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c23
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c29
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c5
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c12
-rw-r--r--drivers/vfio/vfio.c59
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c142
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/backlight/Kconfig35
-rw-r--r--drivers/video/backlight/lm3630a_bl.c153
-rw-r--r--drivers/video/backlight/pwm_bl.c15
-rw-r--r--drivers/video/fbdev/Kconfig309
-rw-r--r--drivers/video/fbdev/Makefile2
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.c251
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.h24
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c567
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.h17
-rw-r--r--drivers/video/fbdev/amba-clcd.c98
-rw-r--r--drivers/video/fbdev/atafb.c67
-rw-r--r--drivers/video/fbdev/atafb_iplan2p2.c23
-rw-r--r--drivers/video/fbdev/atafb_iplan2p4.c23
-rw-r--r--drivers/video/fbdev/atafb_iplan2p8.c23
-rw-r--r--drivers/video/fbdev/atafb_mfb.c23
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c116
-rw-r--r--drivers/video/fbdev/core/fbcmap.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c29
-rw-r--r--drivers/video/fbdev/core/modedb.c3
-rw-r--r--drivers/video/fbdev/fb-puv3.c2
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/imsttfb.c5
-rw-r--r--drivers/video/fbdev/macfb.c29
-rw-r--r--drivers/video/fbdev/mmp/Kconfig6
-rw-r--r--drivers/video/fbdev/mxsfb.c14
-rw-r--r--drivers/video/fbdev/nuc900fb.c2
-rw-r--r--drivers/video/fbdev/omap/Kconfig20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/Kconfig18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig40
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Kconfig6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c6
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/s3c2410fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c6
-rw-r--r--drivers/video/fbdev/sm712.h12
-rw-r--r--drivers/video/fbdev/sm712fb.c243
-rw-r--r--drivers/video/fbdev/udlfb.c114
-rw-r--r--drivers/video/fbdev/uvesafb.c16
-rw-r--r--drivers/video/fbdev/vesafb.c4
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c31
-rw-r--r--drivers/virtio/virtio_ring.c28
-rw-r--r--drivers/watchdog/Kconfig161
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c43
-rw-r--r--drivers/watchdog/asm9260_wdt.c77
-rw-r--r--drivers/watchdog/aspeed_wdt.c25
-rw-r--r--drivers/watchdog/at91sam9_wdt.c4
-rw-r--r--drivers/watchdog/ath79_wdt.c4
-rw-r--r--drivers/watchdog/atlas7_wdt.c65
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm7038_wdt.c42
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c18
-rw-r--r--drivers/watchdog/bd70528_wdt.c290
-rw-r--r--drivers/watchdog/cadence_wdt.c90
-rw-r--r--drivers/watchdog/coh901327_wdt.c28
-rw-r--r--drivers/watchdog/da9052_wdt.c13
-rw-r--r--drivers/watchdog/da9055_wdt.c12
-rw-r--r--drivers/watchdog/da9062_wdt.c20
-rw-r--r--drivers/watchdog/da9063_wdt.c21
-rw-r--r--drivers/watchdog/davinci_wdt.c45
-rw-r--r--drivers/watchdog/digicolor_wdt.c4
-rw-r--r--drivers/watchdog/dw_wdt.c4
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c5
-rw-r--r--drivers/watchdog/ep93xx_wdt.c17
-rw-r--r--drivers/watchdog/f71808e_wdt.c18
-rw-r--r--drivers/watchdog/ftwdt010_wdt.c6
-rw-r--r--drivers/watchdog/gpio_wdt.c16
-rw-r--r--drivers/watchdog/hpwdt.c3
-rw-r--r--drivers/watchdog/i6300esb.c9
-rw-r--r--drivers/watchdog/iTCO_wdt.c13
-rw-r--r--drivers/watchdog/imgpdc_wdt.c95
-rw-r--r--drivers/watchdog/imx2_wdt.c8
-rw-r--r--drivers/watchdog/imx_sc_wdt.c175
-rw-r--r--drivers/watchdog/intel-mid_wdt.c22
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c20
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c9
-rw-r--r--drivers/watchdog/jz4740_wdt.c17
-rw-r--r--drivers/watchdog/kempld_wdt.c28
-rw-r--r--drivers/watchdog/lantiq_wdt.c4
-rw-r--r--drivers/watchdog/loongson1_wdt.c52
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c47
-rw-r--r--drivers/watchdog/machzwd.c3
-rw-r--r--drivers/watchdog/max63xx_wdt.c24
-rw-r--r--drivers/watchdog/max77620_wdt.c23
-rw-r--r--drivers/watchdog/mena21_wdt.c28
-rw-r--r--drivers/watchdog/menf21bmc_wdt.c33
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c49
-rw-r--r--drivers/watchdog/meson_wdt.c19
-rw-r--r--drivers/watchdog/mlx_wdt.c14
-rw-r--r--drivers/watchdog/moxart_wdt.c20
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c20
-rw-r--r--drivers/watchdog/mt7621_wdt.c12
-rw-r--r--drivers/watchdog/mtk_wdt.c37
-rw-r--r--drivers/watchdog/ni903x_wdt.c4
-rw-r--r--drivers/watchdog/nic7018_wdt.c5
-rw-r--r--drivers/watchdog/npcm_wdt.c10
-rw-r--r--drivers/watchdog/nuc900_wdt.c4
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c62
-rw-r--r--drivers/watchdog/omap_wdt.c4
-rw-r--r--drivers/watchdog/orion_wdt.c14
-rw-r--r--drivers/watchdog/pic32-dmt.c50
-rw-r--r--drivers/watchdog/pic32-wdt.c62
-rw-r--r--drivers/watchdog/pm8916_wdt.c21
-rw-r--r--drivers/watchdog/pnx4008_wdt.c45
-rw-r--r--drivers/watchdog/qcom-wdt.c55
-rw-r--r--drivers/watchdog/renesas_wdt.c9
-rw-r--r--drivers/watchdog/rn5t618_wdt.c9
-rw-r--r--drivers/watchdog/rt2880_wdt.c32
-rw-r--r--drivers/watchdog/rtd119x_wdt.c47
-rw-r--r--drivers/watchdog/rza_wdt.c25
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/sama5d4_wdt.c39
-rw-r--r--drivers/watchdog/sb_wdog.c4
-rw-r--r--drivers/watchdog/sbsa_gwdt.c28
-rw-r--r--drivers/watchdog/shwdt.c4
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c28
-rw-r--r--drivers/watchdog/sp5100_tco.c4
-rw-r--r--drivers/watchdog/sprd_wdt.c42
-rw-r--r--drivers/watchdog/st_lpc_wdt.c53
-rw-r--r--drivers/watchdog/stm32_iwdg.c150
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c16
-rw-r--r--drivers/watchdog/stpmic1_wdt.c13
-rw-r--r--drivers/watchdog/sunxi_wdt.c19
-rw-r--r--drivers/watchdog/tangox_wdt.c41
-rw-r--r--drivers/watchdog/tegra_wdt.c30
-rw-r--r--drivers/watchdog/tqmx86_wdt.c14
-rw-r--r--drivers/watchdog/ts4800_wdt.c33
-rw-r--r--drivers/watchdog/ts72xx_wdt.c18
-rw-r--r--drivers/watchdog/twl4030_wdt.c22
-rw-r--r--drivers/watchdog/txx9wdt.c4
-rw-r--r--drivers/watchdog/uniphier_wdt.c2
-rw-r--r--drivers/watchdog/ux500_wdt.c17
-rw-r--r--drivers/watchdog/watchdog_core.c42
-rw-r--r--drivers/watchdog/wdat_wdt.c29
-rw-r--r--drivers/watchdog/wm831x_wdt.c19
-rw-r--r--drivers/watchdog/xen_wdt.c18
-rw-r--r--drivers/watchdog/ziirave_wdt.c6
-rw-r--r--drivers/watchdog/zx2967_wdt.c37
-rw-r--r--drivers/xen/gntdev.c19
-rw-r--r--drivers/xen/privcmd-buf.c8
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
978 files changed, 31559 insertions, 12012 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ddf598ae8b6b..c16f9460c4a2 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -17,6 +17,7 @@
#include <linux/clkdev.h>
#include <linux/acpi.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/pm.h>
#include "internal.h"
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index adbf7cbedf80..9058cb084b91 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1031,6 +1031,14 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
+static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
+{
+ struct acpi_iort_root_complex *pci_rc;
+
+ pci_rc = (struct acpi_iort_root_complex *)node->node_data;
+ return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
+}
+
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
@@ -1066,6 +1074,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
+
+ if (!err && iort_pci_rc_supports_ats(node))
+ dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 8940054d6250..78c2653bf020 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -428,8 +428,10 @@ static ssize_t acpi_device_adr_show(struct device *dev,
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- return sprintf(buf, "0x%08x\n",
- (unsigned int)(acpi_dev->pnp.bus_address));
+ if (acpi_dev->pnp.bus_address > U32_MAX)
+ return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address);
+ else
+ return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
}
static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index a4e8432fc2fb..b42be067fb83 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -52,6 +52,18 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#define AL_ECAM(table_id, rev, seg, ops) \
+ { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
+
+ AL_ECAM("GRAVITON", 0, 0, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 1, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 2, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 3, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 4, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 5, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 6, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 7, &al_pcie_ops),
+
#define QCOM_ECAM32(seg) \
{ "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 707aafc7c2aa..c36781a9b493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -145,6 +145,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = {
{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
{ OSC_PCI_MSI_SUPPORT, "MSI" },
+ { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
};
static struct pci_osc_bit_struct pci_osc_control_bit[] = {
@@ -446,6 +447,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
* PCI domains, so we indicate this in _OSC support capabilities.
*/
support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ support |= OSC_PCI_HPX_TYPE_3_SUPPORT;
if (pci_ext_cfg_avail())
support |= OSC_PCI_EXT_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled())
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 403c4ff15349..e52f1238d2d6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void)
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
+ acpi_enable_wakeup_devices(ACPI_STATE_S0);
+
/* Change the configuration of GPEs to avoid spurious wakeup. */
acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete();
@@ -1027,6 +1029,8 @@ static void acpi_s2idle_restore(void)
{
acpi_enable_all_runtime_gpes();
+ acpi_disable_wakeup_devices(ACPI_STATE_S0);
+
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index cc6d06c1b2c7..db271b705529 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,7 +44,7 @@
#include <linux/ktime.h>
#include <linux/platform_data/dma-ep93xx.h>
-#include <mach/platform.h>
+#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
#define DRV_VERSION "1.0"
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 59b2317acea9..3495e1733a8e 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -909,7 +909,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
host = ata_host_alloc(dev, 1);
if (!host) {
- dev_err(dev, "ata_host_alloc failed\n");
ret = -ENOMEM;
goto err_pm_put;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 668139cfa664..cc37511de866 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -548,11 +548,18 @@ ssize_t __weak cpu_show_l1tf(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -560,6 +567,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
+ &dev_attr_mds.attr,
NULL
};
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index e49028a60429..f180427e48f4 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -231,13 +231,14 @@ static bool pages_correctly_probed(unsigned long start_pfn)
* OK to have direct references to sparsemem variables in here.
*/
static int
-memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
+memory_block_action(unsigned long start_section_nr, unsigned long action,
+ int online_type)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
int ret;
- start_pfn = section_nr_to_pfn(phys_index);
+ start_pfn = section_nr_to_pfn(start_section_nr);
switch (action) {
case MEM_ONLINE:
@@ -251,7 +252,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
- "%ld\n", __func__, phys_index, action, action);
+ "%ld\n", __func__, start_section_nr, action, action);
ret = -EINVAL;
}
@@ -733,16 +734,18 @@ unregister_memory(struct memory_block *memory)
{
BUG_ON(memory->dev.bus != &memory_subsys);
- /* drop the ref. we got in remove_memory_section() */
+ /* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
}
-static int remove_memory_section(unsigned long node_id,
- struct mem_section *section, int phys_device)
+void unregister_memory_section(struct mem_section *section)
{
struct memory_block *mem;
+ if (WARN_ON_ONCE(!present_section(section)))
+ return;
+
mutex_lock(&mem_sysfs_mutex);
/*
@@ -763,15 +766,6 @@ static int remove_memory_section(unsigned long node_id,
out_unlock:
mutex_unlock(&mem_sysfs_mutex);
- return 0;
-}
-
-int unregister_memory_section(struct mem_section *section)
-{
- if (!present_section(section))
- return -EINVAL;
-
- return remove_memory_section(0, section, 0);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 7a6aa2318915..33c30c1e6a30 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -128,6 +128,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
+#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -515,7 +516,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
* (1) The domain is configured as always on.
* (2) When the domain has a subdomain being powered on.
*/
- if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
+ if (genpd_is_always_on(genpd) ||
+ genpd_is_rpm_always_on(genpd) ||
+ atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
@@ -1812,7 +1815,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
}
/* Always-on domains must be powered on at initialization. */
- if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
+ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
+ !genpd_status_on(genpd))
return -EINVAL;
if (genpd_is_cpu_domain(genpd) &&
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 17defbf4f332..2da615b45b31 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -153,6 +153,12 @@ static void brd_free_pages(struct brd_device *brd)
pos++;
/*
+ * It takes 3.4 seconds to remove 80GiB ramdisk.
+ * So, we need cond_resched to avoid stalling the CPU.
+ */
+ cond_resched();
+
+ /*
* This assumes radix_tree_gang_lookup always returns as
* many pages as possible. If the radix-tree code changes,
* so will this have to.
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2210c1b9491b..e5009a34f9c2 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -934,7 +934,7 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
struct rbd_client *rbdc;
int ret;
- mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock(&client_mutex);
rbdc = rbd_client_find(ceph_opts);
if (rbdc) {
ceph_destroy_options(ceph_opts);
@@ -1326,7 +1326,7 @@ static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
zero_bvecs(&obj_req->bvec_pos, off, bytes);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -1581,7 +1581,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
kfree(obj_request->bvec_pos.bvecs);
break;
default:
- rbd_assert(0);
+ BUG();
}
kfree(obj_request->img_extents);
@@ -1781,7 +1781,7 @@ static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2036,7 +2036,7 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req)
ret = rbd_obj_setup_zeroout(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
if (ret < 0)
return ret;
@@ -2383,7 +2383,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
} else {
ret = rbd_img_fill_from_bvecs(child_img_req,
@@ -2515,7 +2515,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
num_osd_ops += count_zeroout_ops(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
@@ -2542,7 +2542,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
__rbd_obj_setup_zeroout(obj_req, which);
break;
default:
- rbd_assert(0);
+ BUG();
}
ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
@@ -3842,8 +3842,12 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- rbd_assert(op_type == OBJ_OP_READ ||
- rbd_dev->spec->snap_id == CEPH_NOSNAP);
+ if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
+ rbd_warn(rbd_dev, "%s on read-only snapshot",
+ obj_op_name(op_type));
+ result = -EIO;
+ goto err;
+ }
/*
* Quit early if the mapped snapshot no longer exists. It's
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index 084ae286fa23..ac58142301f4 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -12,28 +12,38 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
+struct tegra_aconnect {
+ struct clk *ape_clk;
+ struct clk *apb2ape_clk;
+};
+
static int tegra_aconnect_probe(struct platform_device *pdev)
{
- int ret;
+ struct tegra_aconnect *aconnect;
if (!pdev->dev.of_node)
return -EINVAL;
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
+ aconnect = devm_kzalloc(&pdev->dev, sizeof(struct tegra_aconnect),
+ GFP_KERNEL);
+ if (!aconnect)
+ return -ENOMEM;
- ret = of_pm_clk_add_clk(&pdev->dev, "ape");
- if (ret)
- goto clk_destroy;
+ aconnect->ape_clk = devm_clk_get(&pdev->dev, "ape");
+ if (IS_ERR(aconnect->ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve ape clock\n");
+ return PTR_ERR(aconnect->ape_clk);
+ }
- ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape");
- if (ret)
- goto clk_destroy;
+ aconnect->apb2ape_clk = devm_clk_get(&pdev->dev, "apb2ape");
+ if (IS_ERR(aconnect->apb2ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve apb2ape clock\n");
+ return PTR_ERR(aconnect->apb2ape_clk);
+ }
+ dev_set_drvdata(&pdev->dev, aconnect);
pm_runtime_enable(&pdev->dev);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
@@ -41,35 +51,51 @@ static int tegra_aconnect_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
return 0;
-
-clk_destroy:
- pm_clk_destroy(&pdev->dev);
-
- return ret;
}
static int tegra_aconnect_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
- pm_clk_destroy(&pdev->dev);
-
return 0;
}
static int tegra_aconnect_runtime_resume(struct device *dev)
{
- return pm_clk_resume(dev);
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(aconnect->ape_clk);
+ if (ret) {
+ dev_err(dev, "ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(aconnect->apb2ape_clk);
+ if (ret) {
+ clk_disable_unprepare(aconnect->ape_clk);
+ dev_err(dev, "apb2ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
static int tegra_aconnect_runtime_suspend(struct device *dev)
{
- return pm_clk_suspend(dev);
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aconnect->ape_clk);
+ clk_disable_unprepare(aconnect->apb2ape_clk);
+
+ return 0;
}
static const struct dev_pm_ops tegra_aconnect_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
tegra_aconnect_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id tegra_aconnect_of_match[] = {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index d299ec79e4c3..308475ed4b32 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -47,7 +47,10 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
-static const char * const clock_names[SYSC_ICK + 1] = { "fck", "ick", };
+static const char * const clock_names[SYSC_MAX_CLOCKS] = {
+ "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
+ "opt5", "opt6", "opt7",
+};
#define SYSC_IDLEMODE_MASK 3
#define SYSC_CLOCKACTIVITY_MASK 3
@@ -75,6 +78,7 @@ struct sysc {
u32 module_size;
void __iomem *module_va;
int offsets[SYSC_MAX_REGS];
+ struct ti_sysc_module_data *mdata;
struct clk **clocks;
const char **clock_roles;
int nr_clocks;
@@ -94,7 +98,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
-void sysc_write(struct sysc *ddata, int offset, u32 value)
+static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
writel_relaxed(value, ddata->module_va + offset);
}
@@ -128,6 +132,81 @@ static u32 sysc_read_revision(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+static int sysc_add_named_clock_from_child(struct sysc *ddata,
+ const char *name,
+ const char *optfck_name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct device_node *child;
+ struct clk_lookup *cl;
+ struct clk *clock;
+ const char *n;
+
+ if (name)
+ n = name;
+ else
+ n = optfck_name;
+
+ /* Does the clock alias already exist? */
+ clock = of_clk_get_by_name(np, n);
+ if (!IS_ERR(clock)) {
+ clk_put(clock);
+
+ return 0;
+ }
+
+ child = of_get_next_available_child(np, NULL);
+ if (!child)
+ return -ENODEV;
+
+ clock = devm_get_clk_from_child(ddata->dev, child, name);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ /*
+ * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
+ * limit for clk_get(). If cl ever needs to be freed, it should be done
+ * with clkdev_drop().
+ */
+ cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = n;
+ cl->dev_id = dev_name(ddata->dev);
+ cl->clk = clock;
+ clkdev_add(cl);
+
+ clk_put(clock);
+
+ return 0;
+}
+
+static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
+{
+ const char *optfck_name;
+ int error, index;
+
+ if (ddata->nr_clocks < SYSC_OPTFCK0)
+ index = SYSC_OPTFCK0;
+ else
+ index = ddata->nr_clocks;
+
+ if (name)
+ optfck_name = name;
+ else
+ optfck_name = clock_names[index];
+
+ error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
+ if (error)
+ return error;
+
+ ddata->clock_roles[index] = optfck_name;
+ ddata->nr_clocks++;
+
+ return 0;
+}
+
static int sysc_get_one_clock(struct sysc *ddata, const char *name)
{
int error, i, index = -ENODEV;
@@ -199,6 +278,12 @@ static int sysc_get_clocks(struct sysc *ddata)
if (ddata->nr_clocks < 1)
return 0;
+ if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
+ error = sysc_init_ext_opt_clock(ddata, NULL);
+ if (error)
+ return error;
+ }
+
if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
dev_err(ddata->dev, "too many clocks for %pOF\n", np);
@@ -231,39 +316,125 @@ static int sysc_get_clocks(struct sysc *ddata)
return 0;
}
+static int sysc_enable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+}
+
+static int sysc_enable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return 0;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ clk_disable(clock);
+ }
+}
+
/**
- * sysc_init_resets - reset module on init
+ * sysc_init_resets - init rstctrl reset line if configured
* @ddata: device driver data
*
- * A module can have both OCP softreset control and external rstctrl.
- * If more complicated rstctrl resets are needed, please handle these
- * directly from the child device driver and map only the module reset
- * for the parent interconnect target module device.
- *
- * Automatic reset of the module on init can be skipped with the
- * "ti,no-reset-on-init" device tree property.
+ * See sysc_rstctrl_reset_deassert().
*/
static int sysc_init_resets(struct sysc *ddata)
{
- int error;
-
ddata->rsts =
devm_reset_control_array_get_optional_exclusive(ddata->dev);
if (IS_ERR(ddata->rsts))
return PTR_ERR(ddata->rsts);
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
- goto deassert;
-
- error = reset_control_assert(ddata->rsts);
- if (error)
- return error;
-
-deassert:
- error = reset_control_deassert(ddata->rsts);
- if (error)
- return error;
-
return 0;
}
@@ -622,91 +793,239 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
-static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+
+static int sysc_enable_module(struct device *dev)
{
- struct ti_sysc_platform_data *pdata;
struct sysc *ddata;
- int error = 0, i;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
- if (!ddata->enabled)
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ goto set_midle;
+
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+
+set_midle:
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
return 0;
- if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ best_mode = fls(ddata->cfg.midlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return -EINVAL;
+ }
- if (!pdata->idle_module)
- return -ENODEV;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- error = pdata->idle_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not idle: %i\n",
- __func__, error);
+ return 0;
+}
+
+static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+{
+ if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+ else if (idlemodes & SYSC_IDLE_FORCE)
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sysc_disable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int ret;
- goto idled;
+ ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_sidle;
+
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return ret;
}
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+set_sidle:
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ return 0;
- clk_disable(ddata->clocks[i]);
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return ret;
}
-idled:
- ddata->enabled = false;
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- return error;
+ return 0;
}
-static int __maybe_unused sysc_runtime_resume(struct device *dev)
+static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
+ struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
struct sysc *ddata;
- int error = 0, i;
+ int error = 0;
ddata = dev_get_drvdata(dev);
- if (ddata->enabled)
+ if (!ddata->enabled)
return 0;
if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ error = sysc_runtime_suspend_legacy(dev, ddata);
+ if (error)
+ return error;
+ } else {
+ error = sysc_disable_module(dev);
+ if (error)
+ return error;
+ }
- if (!pdata->enable_module)
- return -ENODEV;
+ sysc_disable_main_clocks(ddata);
- error = pdata->enable_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not enable: %i\n",
- __func__, error);
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
- goto awake;
- }
+ ddata->enabled = false;
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ return error;
+}
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
- error = clk_enable(ddata->clocks[i]);
+ if (ddata->enabled)
+ return 0;
+
+ if (sysc_opt_clks_needed(ddata)) {
+ error = sysc_enable_opt_clocks(ddata);
if (error)
return error;
}
-awake:
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_resume_legacy(dev, ddata);
+ if (error)
+ goto err_main_clocks;
+ } else {
+ error = sysc_enable_module(dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
ddata->enabled = true;
+ return 0;
+
+err_main_clocks:
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+
return error;
}
@@ -788,12 +1107,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
+ SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -805,6 +1129,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
0xffff00f0, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0x00001401, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
@@ -853,6 +1178,42 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
#endif
};
+/*
+ * Early quirks based on module base and register offsets only that are
+ * needed before the module revision can be read
+ */
+static void sysc_init_early_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (!q->base)
+ continue;
+
+ if (q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset >= 0 &&
+ q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset >= 0 &&
+ q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset >= 0 &&
+ q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+}
+
+/* Quirks that also consider the revision register value */
static void sysc_init_revision_quirks(struct sysc *ddata)
{
const struct sysc_revision_quirk *q;
@@ -885,6 +1246,55 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * Note that pdata->init_module() typically does a reset first. After
+ * pdata->init_module() is done, PM runtime can be used for the interconnect
+ * target module.
+ */
+static int sysc_legacy_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ int error;
+
+ if (!ddata->legacy_mode || !pdata || !pdata->init_module)
+ return 0;
+
+ error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
+/**
+ * sysc_rstctrl_reset_deassert - deassert rstctrl reset
+ * @ddata: device driver data
+ * @reset: reset before deassert
+ *
+ * A module can have both OCP softreset control and external rstctrl.
+ * If more complicated rstctrl resets are needed, please handle these
+ * directly from the child device driver and map only the module reset
+ * for the parent interconnect target module device.
+ *
+ * Automatic reset of the module on init can be skipped with the
+ * "ti,no-reset-on-init" device tree property.
+ */
+static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
+{
+ int error;
+
+ if (!ddata->rsts)
+ return 0;
+
+ if (reset) {
+ error = reset_control_assert(ddata->rsts);
+ if (error)
+ return error;
+ }
+
+ return reset_control_deassert(ddata->rsts);
+}
+
static int sysc_reset(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_SYSCONFIG];
@@ -915,38 +1325,58 @@ static int sysc_reset(struct sysc *ddata)
100, MAX_MODULE_SOFTRESET_WAIT);
}
-/* At this point the module is configured enough to read the revision */
+/*
+ * At this point the module is configured enough to read the revision but
+ * module may not be completely configured yet to use PM runtime. Enable
+ * all clocks directly during init to configure the quirks needed for PM
+ * runtime based on the revision register.
+ */
static int sysc_init_module(struct sysc *ddata)
{
- int error;
+ int error = 0;
+ bool manage_clocks = true;
+ bool reset = true;
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) {
- ddata->revision = sysc_read_revision(ddata);
- goto rev_quirks;
- }
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ reset = false;
- error = pm_runtime_get_sync(ddata->dev);
- if (error < 0) {
- pm_runtime_put_noidle(ddata->dev);
+ error = sysc_rstctrl_reset_deassert(ddata, reset);
+ if (error)
+ return error;
- return 0;
- }
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
+ manage_clocks = false;
- error = sysc_reset(ddata);
- if (error) {
- dev_err(ddata->dev, "Reset failed with %d\n", error);
- pm_runtime_put_sync(ddata->dev);
+ if (manage_clocks) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
- return error;
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
}
ddata->revision = sysc_read_revision(ddata);
- pm_runtime_put_sync(ddata->dev);
-
-rev_quirks:
sysc_init_revision_quirks(ddata);
- return 0;
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_main_clocks;
+
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+
+err_main_clocks:
+ if (manage_clocks)
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (manage_clocks)
+ sysc_disable_opt_clocks(ddata);
+
+ return error;
}
static int sysc_init_sysc_mask(struct sysc *ddata)
@@ -1208,7 +1638,7 @@ static int sysc_child_resume_noirq(struct device *dev)
}
#endif
-struct dev_pm_domain sysc_child_pm_domain = {
+static struct dev_pm_domain sysc_child_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
sysc_child_runtime_resume,
@@ -1281,6 +1711,8 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
.mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
{ .name = "ti,no-reset-on-init",
.mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
+ { .name = "ti,no-idle",
+ .mask = SYSC_QUIRK_NO_IDLE, },
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -1331,6 +1763,9 @@ static void sysc_unprepare(struct sysc *ddata)
{
int i;
+ if (!ddata->clocks)
+ return;
+
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (!IS_ERR_OR_NULL(ddata->clocks[i]))
clk_unprepare(ddata->clocks[i]);
@@ -1576,28 +2011,26 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
- struct ti_sysc_module_data mdata;
- int error = 0;
+ struct ti_sysc_module_data *mdata;
if (!pdata || !ddata->legacy_mode)
return 0;
- mdata.name = ddata->legacy_mode;
- mdata.module_pa = ddata->module_pa;
- mdata.module_size = ddata->module_size;
- mdata.offsets = ddata->offsets;
- mdata.nr_offsets = SYSC_MAX_REGS;
- mdata.cap = ddata->cap;
- mdata.cfg = &ddata->cfg;
+ mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
- if (!pdata->init_module)
- return -ENODEV;
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
- error = pdata->init_module(ddata->dev, &mdata, &ddata->cookie);
- if (error == -EEXIST)
- error = 0;
+ ddata->mdata = mdata;
- return error;
+ return 0;
}
static int sysc_init_match(struct sysc *ddata)
@@ -1651,10 +2084,6 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
- error = sysc_get_clocks(ddata);
- if (error)
- return error;
-
error = sysc_map_and_check_registers(ddata);
if (error)
goto unprepare;
@@ -1675,15 +2104,21 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
+ sysc_init_early_quirks(ddata);
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
error = sysc_init_resets(ddata);
if (error)
return error;
- pm_runtime_enable(ddata->dev);
error = sysc_init_module(ddata);
if (error)
goto unprepare;
+ pm_runtime_enable(ddata->dev);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 02d3bcd6216c..71c2e9519ca8 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index c68dada97316..aba787b2e771 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index 2a2c7569336a..b6d07ca0164f 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/bcm2835-aux.h>
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 9fcae932e082..770bb01f523e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index eee64b9e5d10..cc3b1e1bc087 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -15,8 +15,9 @@
#include "clk-kona.h"
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
diff --git a/drivers/clk/berlin/berlin2-div.c b/drivers/clk/berlin/berlin2-div.c
index 4d0be66aa6a8..eb14a5bc0507 100644
--- a/drivers/clk/berlin/berlin2-div.c
+++ b/drivers/clk/berlin/berlin2-div.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 0b4b44a2579e..bccdfa00fd37 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 9b9db743df25..e9518d35f262 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index d1a97d971183..51f26619b6a2 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -10,8 +10,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index d81f1d2e9129..b1e556f20911 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index a47c2b600f20..97d1e8c35b71 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index 94470b4eadf4..e507aa958da9 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/slab.h>
diff --git a/drivers/clk/davinci/pll-da850.c b/drivers/clk/davinci/pll-da850.c
index 0f7198191ea2..bf120bec59ae 100644
--- a/drivers/clk/davinci/pll-da850.c
+++ b/drivers/clk/davinci/pll-da850.c
@@ -11,6 +11,7 @@
#include <linux/clkdev.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index d413ade95c99..376be03bb546 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index c7ae653c8a16..67c495b67c18 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -6,8 +6,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index e8b2c43b1bb8..89934bee7c9e 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 574fac1a169f..388bdb94f841 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -3,9 +3,10 @@
* Copyright 2018 NXP
*/
+#include <linux/clk-provider.h>
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/clk-provider.h>
#include "clk.h"
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 76b9eb15604e..fece503e3610 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c
index e63188eb08ac..6e93284c397b 100644
--- a/drivers/clk/imx/clk-imx21.c
+++ b/drivers/clk/imx/clk-imx21.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx21-clock.h>
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index 0a0ab95d16fe..a3753067fc12 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -3,6 +3,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx27-clock.h>
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index fb567dcc2118..a03bbed662c6 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index d7e62c3620d3..8155b12cf0e1 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index 991bbe63f156..5d65f65c512e 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 510b685212d3..b80af61dc1f3 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -20,6 +20,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index b86edd328249..25f7df028e67 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4740-cgu.h>
#include <asm/mach-jz4740/clock.h>
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index bf46a0df2004..dfce740c25a8 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/clock/jz4770-cgu.h>
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index 6427be117ff1..d03b7fcfd82b 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4780-cgu.h>
#include "cgu.h"
diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
index 3466f7320b40..22a165ef65cf 100644
--- a/drivers/clk/loongson1/clk-loongson1c.c
+++ b/drivers/clk/loongson1/clk-loongson1c.c
@@ -9,6 +9,7 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <loongson1.h>
#include "clk.h"
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index c3b301463425..4680064f1951 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <asm/mach-pic32/pic32.h>
#include <asm/traps.h>
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 9f734779be92..e6c05df2d47f 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 1f1cff428d78..5fc6d486a381 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index ee272d4d8c24..585a02e0b330 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 1fc84b0e72ee..818b175391fa 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 5969f620607a..f2e171a01fb4 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index f5bc8bd192b7..8b686da5577b 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 7524d19fe60b..7f67c1036ff9 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 42627bf8a09e..5326f77eb35a 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/pxa-clock.h>
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 2719c248c67b..cfed11c659d9 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 5967656c13cc..d8190f007a81 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 2913b4148157..da9fe3f032eb 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 3cda53a97f4e..fbc34beafc78 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index dc8ffc7c727a..5f25a70bc61c 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 97c72477cd54..7d042183aa37 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index b241f9ca3d71..cc90b11a9c25 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 30df0dc853f0..0201809bbd37 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index 784b81e1ea7c..ba9f00dc9740 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -3,8 +3,9 @@
* Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
#include "clk.h"
#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index 601a77f1af78..68d23be18cbc 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index c3001980dbdc..3bf919b6c6e3 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 5970a50671b9..8278a54db343 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 5ecf28854876..378420b8835a 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/rk3188-cru-common.h>
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 7af48184b022..7176003b5c7c 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 24baeb56a1b3..85907f31c63f 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index f12142d9cea2..9b03c1abf19c 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 7c4d242f19c1..d239bbc2fc77 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 5a628148f3f0..2322d712ba88 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c
index 089cb17925e5..6c051aa04e59 100644
--- a/drivers/clk/rockchip/clk-rv1108.c
+++ b/drivers/clk/rockchip/clk-rv1108.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 0ea8e8080d1a..d5fac5a8a3d7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index a5fddebbe530..3f80bcd46074 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -33,6 +33,7 @@
*/
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 9c95390d2d77..ce41f36a0e29 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 0e9a41a4cac8..facaad3c56a1 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 54066e6508d3..d2a68a792a21 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 8ae44b5db4c2..91db7894125d 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -4,6 +4,7 @@
// Author: Marek Szyprowski <m.szyprowski@samsung.com>
// Common Clock Framework support for Exynos5 power-domain dependent clocks
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f14139bcb0c1..c8265c4cbc4f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -12,6 +12,7 @@
#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 1c4c7a3039f1..0c6782ceac48 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -13,7 +13,8 @@
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 82f8ae22fd34..0117e40c1d0a 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index dd1159050a5a..ce21b89d1eb1 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index f38f0e24e3b6..b2ea4dfb5b8c 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 1f6e47cd327d..9ad546a5f74c 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -15,6 +15,7 @@
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
index 0ec8bf7b4b28..6282ee2f361c 100644
--- a/drivers/clk/sifive/fu540-prci.c
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -30,6 +30,7 @@
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_clk.h>
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index eee2d48ab656..54a464fa63e0 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017, Intel Corporation
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 568f59b58ddf..5c50e723ecae 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index c4d0b6f6abf2..4705eb544f01 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index c514d39760cb..23497f07ad89 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -14,6 +14,7 @@
*/
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index 129ebd2588fd..2bbfb3343311 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index be0deee70182..d3fc1f5bf396 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 3c32d7798f27..9d3f98962779 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index fa2c2dd77102..813e9bf73cbf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 609970c0b666..b494c4fe0b2c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -16,6 +16,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 4b5f8f4e4ab8..a9c0c5406b85 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index c7bf814dfd2b..25bcf3fd2dfc 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 5f714b4d8ee4..be5920e8a9ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index e71e2451c2e3..0f3df565c6c1 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index a22d11aa38ba..f9625f7b9ec2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index eada0e291859..ec64eb692ecf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index 8936ef87652c..0e23583e4f58 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index dc9f0a365664..e748b8a6f3c5 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 302a18efd39f..6d407a8a61ee 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_div.h"
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index d1d168d4c4f0..1842603f8f11 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_frac.h"
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index cd069d5da215..9c81644e9dfe 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
index f9869f7353c0..b23410682088 100644
--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/sunxi-ng.h>
+#include <linux/io.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 0357349eb767..e17fb4c9fcfe 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mp.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 12e0783caee6..c2a672797a74 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mult.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 312664155a54..f9b409c3a89c 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mux.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 2485bda87a9a..50c7e6b1ba13 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nk.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 841840e35e61..aa5beaabc292 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkm.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index cbcdf664f336..53ec4fb59880 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkmp.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 424d8635b053..e15413174aa7 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_frac.h"
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index 400c58ad72fd..0a4a6fd13f5b 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_phase.h"
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 3b3dc9bdf2b0..e510467ea24c 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_sdm.h"
diff --git a/drivers/clk/sunxi/clk-a10-mod1.c b/drivers/clk/sunxi/clk-a10-mod1.c
index e2819fa09637..9e6796a7b4c4 100644
--- a/drivers/clk/sunxi/clk-a10-mod1.c
+++ b/drivers/clk/sunxi/clk-a10-mod1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index d8eab90ae661..a709b6a551af 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index d9ea22ec4e25..d119b453dccd 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 3437f734c9bf..e6d639d9ea70 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index fc0278a1acc7..915954507d0a 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index a085c3bc127c..8130467d647a 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-display.c b/drivers/clk/sunxi/clk-sun4i-display.c
index 9780fac6d029..bb2dc83fc697 100644
--- a/drivers/clk/sunxi/clk-sun4i-display.c
+++ b/drivers/clk/sunxi/clk-sun4i-display.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-pll3.c b/drivers/clk/sunxi/clk-sun4i-pll3.c
index f66267e77d9c..c879d7e25ca0 100644
--- a/drivers/clk/sunxi/clk-sun4i-pll3.c
+++ b/drivers/clk/sunxi/clk-sun4i-pll3.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
index b6d29d1bedca..af8ca5019639 100644
--- a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
+++ b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index d5c31804ee54..5a7d4dd09e85 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index bee305bdddbe..bfbcd71b225d 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 56db89b6979f..0e924c9cbd5c 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-cpus.c b/drivers/clk/sunxi/clk-sun9i-cpus.c
index 4d5e14142e15..01255d827fc9 100644
--- a/drivers/clk/sunxi/clk-sun9i-cpus.c
+++ b/drivers/clk/sunxi/clk-sun9i-cpus.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index f00d8758ba24..da264d0f7f4b 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -18,6 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 892c29030b7b..f5b1c0067365 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 917fc27a33dd..7d15e0432ed4 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 93ecb538e59b..b7f763f0ecd8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/tegra/clk-periph-fixed.c b/drivers/clk/tegra/clk-periph-fixed.c
index c57dfb037b10..956f2215c733 100644
--- a/drivers/clk/tegra/clk-periph-fixed.c
+++ b/drivers/clk/tegra/clk-periph-fixed.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
index 473d418533cb..a5cd3e31dbae 100644
--- a/drivers/clk/tegra/clk-sdmmc-mux.c
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/types.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index ffaf17f71860..6f2862eddad7 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk/tegra.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 0c210984765a..fdfb90058504 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -14,6 +14,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ba17cc5bd04b..e0b8ed3a1e80 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/list.h>
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index ed24f20f63c7..95e36ba64acc 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index c2b6bb814742..4fa0cd951d2e 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 19174835693b..5970edb6d334 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 8febd2431545..a11f93ecbf34 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -739,8 +739,8 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops)
- return -ENXIO;
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
ret = zynqmp_clk_setup(dev->of_node);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4b3d143f0f8a..48321488f0fd 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -69,6 +69,13 @@ config FTTMR010_TIMER
Enables support for the Faraday Technology timer block
FTTMR010.
+config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
config ROCKCHIP_TIMER
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index be6e0fbc7489..dba4eff880de 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
+obj-$(CONFIG_IXP4XX_TIMER) += timer-ixp4xx.o
obj-$(CONFIG_ROCKCHIP_TIMER) += timer-rockchip.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
new file mode 100644
index 000000000000..5c2190b654cd
--- /dev/null
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IXP4 timer driver
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+/* Goes away with OF conversion */
+#include <linux/platform_data/timer-ixp4xx.h>
+
+/*
+ * Constants to make it easy to access Timer Control/Status registers
+ */
+#define IXP4XX_OSTS_OFFSET 0x00 /* Continuous Timestamp */
+#define IXP4XX_OST1_OFFSET 0x04 /* Timer 1 Timestamp */
+#define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */
+#define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */
+#define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */
+#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */
+#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */
+#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */
+#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */
+
+/*
+ * Timer register values and bit definitions
+ */
+#define IXP4XX_OST_ENABLE 0x00000001
+#define IXP4XX_OST_ONE_SHOT 0x00000002
+/* Low order bits of reload value ignored */
+#define IXP4XX_OST_RELOAD_MASK 0x00000003
+#define IXP4XX_OST_DISABLED 0x00000000
+#define IXP4XX_OSST_TIMER_1_PEND 0x00000001
+#define IXP4XX_OSST_TIMER_2_PEND 0x00000002
+#define IXP4XX_OSST_TIMER_TS_PEND 0x00000004
+#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008
+#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010
+
+#define IXP4XX_WDT_KEY 0x0000482E
+#define IXP4XX_WDT_RESET_ENABLE 0x00000001
+#define IXP4XX_WDT_IRQ_ENABLE 0x00000002
+#define IXP4XX_WDT_COUNT_ENABLE 0x00000004
+
+struct ixp4xx_timer {
+ void __iomem *base;
+ unsigned int tick_rate;
+ u32 latch;
+ struct clock_event_device clkevt;
+#ifdef CONFIG_ARM
+ struct delay_timer delay_timer;
+#endif
+};
+
+/*
+ * A local singleton used by sched_clock and delay timer reads, which are
+ * fast and stateless
+ */
+static struct ixp4xx_timer *local_ixp4xx_timer;
+
+static inline struct ixp4xx_timer *
+to_ixp4xx_timer(struct clock_event_device *evt)
+{
+ return container_of(evt, struct ixp4xx_timer, clkevt);
+}
+
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static u64 ixp4xx_clocksource_read(struct clocksource *c)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
+{
+ struct ixp4xx_timer *tmr = dev_id;
+ struct clock_event_device *evt = &tmr->clkevt;
+
+ /* Clear Pending Interrupt */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int ixp4xx_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ /* Keep enable/oneshot bits */
+ val &= IXP4XX_OST_RELOAD_MASK;
+ __raw_writel((cycles & ~IXP4XX_OST_RELOAD_MASK) | val,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_shutdown(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val &= ~IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_oneshot(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+
+ __raw_writel(IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_periodic(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = tmr->latch & ~IXP4XX_OST_RELOAD_MASK;
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_resume(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+/*
+ * IXP4xx timer tick
+ * We use OS timer1 on the CPU for the timer tick and the timestamp
+ * counter as a source of real clock ticks to account for missed jiffies.
+ */
+static __init int ixp4xx_timer_register(void __iomem *base,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ struct ixp4xx_timer *tmr;
+ int ret;
+
+ tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
+ if (!tmr)
+ return -ENOMEM;
+ tmr->base = base;
+ tmr->tick_rate = timer_freq;
+
+ /*
+ * The timer register doesn't allow to specify the two least
+ * significant bits of the timeout value and assumes them being zero.
+ * So make sure the latch is the best value with the two least
+ * significant bits unset.
+ */
+ tmr->latch = DIV_ROUND_CLOSEST(timer_freq,
+ (IXP4XX_OST_RELOAD_MASK + 1) * HZ)
+ * (IXP4XX_OST_RELOAD_MASK + 1);
+
+ local_ixp4xx_timer = tmr;
+
+ /* Reset/disable counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ /* Clear any pending interrupt on timer 1 */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ /* Reset time-stamp counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSTS_OFFSET);
+
+ clocksource_mmio_init(NULL, "OSTS", timer_freq, 200, 32,
+ ixp4xx_clocksource_read);
+
+ tmr->clkevt.name = "ixp4xx timer1";
+ tmr->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ tmr->clkevt.rating = 200;
+ tmr->clkevt.set_state_shutdown = ixp4xx_shutdown;
+ tmr->clkevt.set_state_periodic = ixp4xx_set_periodic;
+ tmr->clkevt.set_state_oneshot = ixp4xx_set_oneshot;
+ tmr->clkevt.tick_resume = ixp4xx_resume;
+ tmr->clkevt.set_next_event = ixp4xx_set_next_event;
+ tmr->clkevt.cpumask = cpumask_of(0);
+ tmr->clkevt.irq = timer_irq;
+ ret = request_irq(timer_irq, ixp4xx_timer_interrupt,
+ IRQF_TIMER, "IXP4XX-TIMER1", tmr);
+ if (ret) {
+ pr_crit("no timer IRQ\n");
+ return -ENODEV;
+ }
+ clockevents_config_and_register(&tmr->clkevt, timer_freq,
+ 0xf, 0xfffffffe);
+
+ sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
+ * @timerbase: physical base of timer block
+ * @timer_irq: Linux IRQ number for the timer
+ * @timer_freq: Fixed frequency of the timer
+ */
+void __init ixp4xx_timer_setup(resource_size_t timerbase,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ void __iomem *base;
+
+ base = ioremap(timerbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return;
+ }
+ ixp4xx_timer_register(base, timer_irq, timer_freq);
+}
+EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
+
+#ifdef CONFIG_OF
+static __init int ixp4xx_of_timer_init(struct device_node *np)
+{
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ\n");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ /* TODO: get some fixed clocks into the device tree */
+ ret = ixp4xx_timer_register(base, irq, 66666000);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+ return ret;
+}
+TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
+#endif
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index db779b650fce..85ff958e01f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -340,11 +340,14 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs,
unsigned int state)
{
+ int cpu;
+
BUG_ON(irqs_disabled());
if (cpufreq_disabled())
return;
+ freqs->policy = policy;
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
@@ -364,10 +367,8 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
}
}
- for_each_cpu(freqs->cpu, policy->cpus) {
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_PRECHANGE, freqs);
- }
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
@@ -377,11 +378,11 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
cpumask_pr_args(policy->cpus));
- for_each_cpu(freqs->cpu, policy->cpus) {
- trace_cpu_frequency(freqs->new, freqs->cpu);
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_POSTCHANGE, freqs);
- }
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_frequency(freqs->new, cpu);
+
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_POSTCHANGE, freqs);
cpufreq_stats_record_transition(policy, freqs->new);
policy->cur = freqs->new;
@@ -618,50 +619,52 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
return NULL;
}
+static int cpufreq_parse_policy(char *str_governor,
+ struct cpufreq_policy *policy)
+{
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
+ }
+ return -EINVAL;
+}
+
/**
- * cpufreq_parse_governor - parse a governor string
+ * cpufreq_parse_governor - parse a governor string only for !setpolicy
*/
static int cpufreq_parse_governor(char *str_governor,
struct cpufreq_policy *policy)
{
- if (cpufreq_driver->setpolicy) {
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- return 0;
- }
+ struct cpufreq_governor *t;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
- return 0;
- }
- } else {
- struct cpufreq_governor *t;
+ mutex_lock(&cpufreq_governor_mutex);
- mutex_lock(&cpufreq_governor_mutex);
+ t = find_governor(str_governor);
+ if (!t) {
+ int ret;
- t = find_governor(str_governor);
- if (!t) {
- int ret;
-
- mutex_unlock(&cpufreq_governor_mutex);
+ mutex_unlock(&cpufreq_governor_mutex);
- ret = request_module("cpufreq_%s", str_governor);
- if (ret)
- return -EINVAL;
+ ret = request_module("cpufreq_%s", str_governor);
+ if (ret)
+ return -EINVAL;
- mutex_lock(&cpufreq_governor_mutex);
+ mutex_lock(&cpufreq_governor_mutex);
- t = find_governor(str_governor);
- }
- if (t && !try_module_get(t->owner))
- t = NULL;
+ t = find_governor(str_governor);
+ }
+ if (t && !try_module_get(t->owner))
+ t = NULL;
- mutex_unlock(&cpufreq_governor_mutex);
+ mutex_unlock(&cpufreq_governor_mutex);
- if (t) {
- policy->governor = t;
- return 0;
- }
+ if (t) {
+ policy->governor = t;
+ return 0;
}
return -EINVAL;
@@ -783,8 +786,13 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy))
- return -EINVAL;
+ if (cpufreq_driver->setpolicy) {
+ if (cpufreq_parse_policy(str_governor, &new_policy))
+ return -EINVAL;
+ } else {
+ if (cpufreq_parse_governor(str_governor, &new_policy))
+ return -EINVAL;
+ }
ret = cpufreq_set_policy(policy, &new_policy);
@@ -1050,32 +1058,39 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
- struct cpufreq_governor *gov = NULL;
+ struct cpufreq_governor *gov = NULL, *def_gov = NULL;
struct cpufreq_policy new_policy;
memcpy(&new_policy, policy, sizeof(*policy));
- /* Update governor of new_policy to the governor used before hotplug */
- gov = find_governor(policy->last_governor);
- if (gov) {
- pr_debug("Restoring governor %s for cpu %d\n",
+ def_gov = cpufreq_default_governor();
+
+ if (has_target()) {
+ /*
+ * Update governor of new_policy to the governor used before
+ * hotplug
+ */
+ gov = find_governor(policy->last_governor);
+ if (gov) {
+ pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
+ } else {
+ if (!def_gov)
+ return -ENODATA;
+ gov = def_gov;
+ }
+ new_policy.governor = gov;
} else {
- gov = cpufreq_default_governor();
- if (!gov)
- return -ENODATA;
- }
-
- new_policy.governor = gov;
-
- /* Use the default policy if there is no last_policy. */
- if (cpufreq_driver->setpolicy) {
- if (policy->last_policy)
+ /* Use the default policy if there is no last_policy. */
+ if (policy->last_policy) {
new_policy.policy = policy->last_policy;
- else
- cpufreq_parse_governor(gov->name, &new_policy);
+ } else {
+ if (!def_gov)
+ return -ENODATA;
+ cpufreq_parse_policy(def_gov->name, &new_policy);
+ }
}
- /* set default policy */
+
return cpufreq_set_policy(policy, &new_policy);
}
@@ -1133,6 +1148,11 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ /*
+ * The entire policy object will be freed below, but the extra
+ * memory allocated for the kobject name needs to be freed by
+ * releasing the kobject.
+ */
kobject_put(&policy->kobj);
goto err_free_real_cpus;
}
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index be89416e2358..21c9ce8526c0 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 3e23d4b2cce2..c0ece44f303b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -89,6 +89,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -2052,6 +2053,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -2070,6 +2072,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -2089,6 +2092,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* single-pass ipsec_esp descriptor */
@@ -3334,6 +3338,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -3356,6 +3361,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
};
@@ -3417,8 +3423,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 70af211d2d01..d290d6b41825 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -36,6 +36,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -1523,6 +1524,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1541,6 +1543,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1560,6 +1563,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -2433,8 +2437,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 33a4df6b81de..2b2980a8a9b9 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -42,6 +42,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -1480,7 +1481,7 @@ static int caam_cra_init_aead(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
- alg->setkey == aead_setkey);
+ !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -1641,6 +1642,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1659,6 +1661,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1678,6 +1681,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -2755,6 +2759,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -2777,6 +2782,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index a4129a35a330..4da844e4b61d 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -22,7 +22,7 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
size_t len;
void *buf;
- for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ for (it = sg; it && tlen > 0 ; it = sg_next(it)) {
/*
* make sure the scatterlist's page
* has a valid virtual memory mapping
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 044a69b526f7..1de2562d0982 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -213,7 +213,7 @@ static void caam_jr_dequeue(unsigned long devarg)
mb();
/* set done */
- wr_reg32_relaxed(&jrp->rregs->outring_rmvd, 1);
+ wr_reg32(&jrp->rregs->outring_rmvd, 1);
jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
(JOBR_DEPTH - 1);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index c1fa1ec701d9..8591914d5c51 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -96,14 +96,6 @@ cpu_to_caam(16)
cpu_to_caam(32)
cpu_to_caam(64)
-static inline void wr_reg32_relaxed(void __iomem *reg, u32 data)
-{
- if (caam_little_end)
- writel_relaxed(data, reg);
- else
- writel_relaxed(cpu_to_be32(data), reg);
-}
-
static inline void wr_reg32(void __iomem *reg, u32 data)
{
if (caam_little_end)
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 8a76fce22943..177f572b9589 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -200,17 +200,10 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
static int chcr_inc_wrcount(struct chcr_dev *dev)
{
- int err = 0;
-
- spin_lock_bh(&dev->lock_chcr_dev);
if (dev->state == CHCR_DETACH)
- err = 1;
- else
- atomic_inc(&dev->inflight);
-
- spin_unlock_bh(&dev->lock_chcr_dev);
-
- return err;
+ return 1;
+ atomic_inc(&dev->inflight);
+ return 0;
}
static inline void chcr_dec_wrcount(struct chcr_dev *dev)
@@ -1101,8 +1094,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
- AES_BLOCK_SIZE));
+ ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 239b933d6df6..029a7354f541 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -243,15 +243,11 @@ static void chcr_detach_device(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev = &u_ctx->dev;
- spin_lock_bh(&dev->lock_chcr_dev);
if (dev->state == CHCR_DETACH) {
- spin_unlock_bh(&dev->lock_chcr_dev);
pr_debug("Detached Event received for already detach device\n");
return;
}
dev->state = CHCR_DETACH;
- spin_unlock_bh(&dev->lock_chcr_dev);
-
if (atomic_read(&dev->inflight) != 0) {
schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
wait_for_completion(&dev->detach_comp);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 2f60049361ef..f429aae72542 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -575,7 +575,8 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
netif_tx_stop_queue(q->txq);
q->q.stops++;
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (!q->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr_mid |= FW_ULPTX_WR_DATA_F;
wr->wreq.flowid_len16 = htonl(wr_mid);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9bbde2f26cac..f5414b6dfb55 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -30,8 +30,8 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define MAX_KEYLEN 32
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 5ef624fe3934..a59f338f520f 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -23,7 +23,6 @@ config DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
- depends on m # until we can kill DEV_DAX_PMEM_COMPAT
default DEV_DAX
help
Support raw access to persistent memory. Note that this
@@ -50,7 +49,7 @@ config DEV_DAX_KMEM
config DEV_DAX_PMEM_COMPAT
tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
- depends on DEV_DAX_PMEM
+ depends on m && DEV_DAX_PMEM=m
default DEV_DAX_PMEM
help
Older versions of the libdaxctl library expect to find all
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index e428468ab661..996d68ff992a 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -184,8 +184,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
- vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -235,8 +234,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
- vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index f71019ce0647..f9f51786d556 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -37,13 +37,13 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
devm_nsio_disable(dev, nsio);
/* reserve the metadata area, device-dax will reserve the data */
- pfn_sb = nd_pfn->pfn_sb;
+ pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
- dev_warn(dev, "could not reserve metadata\n");
+ dev_warn(dev, "could not reserve metadata\n");
return ERR_PTR(-EBUSY);
- }
+ }
rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
if (rc != 2)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3aa8733f832a..9bf06042619a 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -29,6 +29,7 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
static DEFINE_SPINLOCK(dma_fence_stub_lock);
static struct dma_fence dma_fence_stub;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 47eb4d13ed5f..5e2e0348d460 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -263,8 +263,8 @@ config EDAC_PND2
micro-server but may appear on others in the future.
config EDAC_MPC85XX
- tristate "Freescale MPC83xx / MPC85xx"
- depends on FSL_SOC
+ bool "Freescale MPC83xx / MPC85xx"
+ depends on FSL_SOC && EDAC=y
help
Support for error detection and correction on the Freescale
MPC8349, MPC8560, MPC8540, MPC8548, T4240
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 13594ffadcb3..64922c8fa7e3 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -679,22 +679,18 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
struct mem_ctl_info *edac_mc_find(int idx)
{
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
struct list_head *item;
mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
-
- if (mci->mc_idx >= idx) {
- if (mci->mc_idx == idx) {
- goto unlock;
- }
- break;
- }
+ if (mci->mc_idx == idx)
+ goto unlock;
}
+ mci = NULL;
unlock:
mutex_unlock(&mem_ctls_mutex);
return mci;
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 35e784cffc23..5414eb1306aa 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -107,19 +107,8 @@ EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
struct vm_area_struct *vma)
{
- unsigned long uaddr;
- int i, err;
-
- uaddr = vma->vm_start;
- for (i = 0; i < buffer->page_count; i++) {
- err = vm_insert_page(vma, uaddr, buffer->pages[i]);
- if (err)
- return err;
-
- uaddr += PAGE_SIZE;
- }
-
- return 0;
+ return vm_map_pages_zero(vma, buffer->pages,
+ buffer->page_count);
}
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 7b655f6156fb..11fda9eb2466 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -253,6 +253,22 @@ config TI_SCI_PROTOCOL
This protocol library is used by client drivers to use the features
provided by the system controller.
+config TRUSTED_FOUNDATIONS
+ bool "Trusted Foundations secure monitor support"
+ depends on ARM
+ help
+ Some devices (including most early Tegra-based consumer devices on
+ the market) are booted with the Trusted Foundations secure monitor
+ active, requiring some core operations to be performed by the secure
+ monitor instead of the kernel.
+
+ This option allows the kernel to invoke the secure monitor whenever
+ required on devices using Trusted Foundations. See the functions and
+ comments in linux/firmware/trusted_foundations.h or the device tree
+ bindings for "tlm,trusted-foundations" for details on how to use it.
+
+ Choose N if you don't know what this is about.
+
config HAVE_ARM_SMCCC
bool
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 9a3909a22682..3fa0b34eb72f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
obj-y += psci/
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8f952f2f1a29..b5bc4c7a8fab 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -654,9 +654,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
static int scmi_mailbox_check(struct device_node *np)
{
- struct of_phandle_args arg;
-
- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
+ return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
}
static int scmi_mbox_free_channel(int id, void *p, void *data)
@@ -798,7 +796,9 @@ static int scmi_probe(struct platform_device *pdev)
return -EINVAL;
}
- desc = of_match_device(scmi_of_match, dev)->data;
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 1b2e15b3c9ca..802c4ad8e8f9 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o
+obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o
obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
new file mode 100644
index 000000000000..043833ad3c1a
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ *
+ * Implementation of the SCU IRQ functions using MU.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/mailbox_client.h>
+
+#define IMX_SC_IRQ_FUNC_ENABLE 1
+#define IMX_SC_IRQ_FUNC_STATUS 2
+#define IMX_SC_IRQ_NUM_GROUP 4
+
+static u32 mu_resource_id;
+
+struct imx_sc_msg_irq_get_status {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u16 resource;
+ u8 group;
+ u8 reserved;
+ } __packed req;
+ struct {
+ u32 status;
+ } resp;
+ } data;
+};
+
+struct imx_sc_msg_irq_enable {
+ struct imx_sc_rpc_msg hdr;
+ u32 mask;
+ u16 resource;
+ u8 group;
+ u8 enable;
+} __packed;
+
+static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
+static struct work_struct imx_sc_irq_work;
+static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
+
+int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_register_notifier);
+
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
+
+static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
+{
+ return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain,
+ status, (void *)group);
+}
+
+static void imx_scu_irq_work_handler(struct work_struct *work)
+{
+ struct imx_sc_msg_irq_get_status msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ u32 irq_status;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_STATUS;
+ hdr->size = 2;
+
+ msg.data.req.resource = mu_resource_id;
+ msg.data.req.group = i;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("get irq group %d status failed, ret %d\n",
+ i, ret);
+ return;
+ }
+
+ irq_status = msg.data.resp.status;
+ if (!irq_status)
+ continue;
+
+ imx_scu_irq_notifier_call_chain(irq_status, &i);
+ }
+}
+
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ struct imx_sc_msg_irq_enable msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
+ hdr->size = 3;
+
+ msg.resource = mu_resource_id;
+ msg.group = group;
+ msg.mask = mask;
+ msg.enable = enable;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret)
+ pr_err("enable irq failed, group %d, mask %d, ret %d\n",
+ group, mask, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_irq_group_enable);
+
+static void imx_scu_irq_callback(struct mbox_client *c, void *msg)
+{
+ schedule_work(&imx_sc_irq_work);
+}
+
+int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ struct of_phandle_args spec;
+ struct mbox_client *cl;
+ struct mbox_chan *ch;
+ int ret = 0, i = 0;
+
+ ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
+ if (ret)
+ return ret;
+
+ cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->dev = dev;
+ cl->rx_callback = imx_scu_irq_callback;
+
+ /* SCU general IRQ uses general interrupt channel 3 */
+ ch = mbox_request_channel_byname(cl, "gip3");
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
+ devm_kfree(dev, cl);
+ return ret;
+ }
+
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec))
+ i = of_alias_get_id(spec.np, "mu");
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 2bb1a19c413f..04a24a863d6e 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -246,6 +247,11 @@ static int imx_scu_probe(struct platform_device *pdev)
imx_sc_ipc_handle = sc_ipc;
+ ret = imx_scu_enable_general_irq_channel(dev);
+ if (ret)
+ dev_warn(dev,
+ "failed to enable general irq channel: %d\n", ret);
+
dev_info(dev, "NXP i.MX SCU Initialized\n");
return devm_of_platform_populate(dev);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 39a94c7177fc..480cec69e2c9 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -74,7 +74,10 @@ struct imx_sc_pd_range {
char *name;
u32 rsrc;
u8 num;
+
+ /* add domain index */
bool postfix;
+ u8 start_from;
};
struct imx_sc_pd_soc {
@@ -84,71 +87,75 @@ struct imx_sc_pd_soc {
static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* LSIO SS */
- { "lsio-pwm", IMX_SC_R_PWM_0, 8, 1 },
- { "lsio-gpio", IMX_SC_R_GPIO_0, 8, 1 },
- { "lsio-gpt", IMX_SC_R_GPT_0, 5, 1 },
- { "lsio-kpp", IMX_SC_R_KPP, 1, 0 },
- { "lsio-fspi", IMX_SC_R_FSPI_0, 2, 1 },
- { "lsio-mu", IMX_SC_R_MU_0A, 14, 1 },
+ { "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
+ { "gpio", IMX_SC_R_GPIO_0, 8, true, 0 },
+ { "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
+ { "kpp", IMX_SC_R_KPP, 1, false, 0 },
+ { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
+ { "mu", IMX_SC_R_MU_0A, 14, true, 0 },
/* CONN SS */
- { "con-usb", IMX_SC_R_USB_0, 2, 1 },
- { "con-usb0phy", IMX_SC_R_USB_0_PHY, 1, 0 },
- { "con-usb2", IMX_SC_R_USB_2, 1, 0 },
- { "con-usb2phy", IMX_SC_R_USB_2_PHY, 1, 0 },
- { "con-sdhc", IMX_SC_R_SDHC_0, 3, 1 },
- { "con-enet", IMX_SC_R_ENET_0, 2, 1 },
- { "con-nand", IMX_SC_R_NAND, 1, 0 },
- { "con-mlb", IMX_SC_R_MLB_0, 1, 1 },
-
- /* Audio DMA SS */
- { "adma-audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, 0 },
- { "adma-audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, 0 },
- { "adma-audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, 0 },
- { "adma-dma0-ch", IMX_SC_R_DMA_0_CH0, 16, 1 },
- { "adma-dma1-ch", IMX_SC_R_DMA_1_CH0, 16, 1 },
- { "adma-dma2-ch", IMX_SC_R_DMA_2_CH0, 5, 1 },
- { "adma-asrc0", IMX_SC_R_ASRC_0, 1, 0 },
- { "adma-asrc1", IMX_SC_R_ASRC_1, 1, 0 },
- { "adma-esai0", IMX_SC_R_ESAI_0, 1, 0 },
- { "adma-spdif0", IMX_SC_R_SPDIF_0, 1, 0 },
- { "adma-sai", IMX_SC_R_SAI_0, 3, 1 },
- { "adma-amix", IMX_SC_R_AMIX, 1, 0 },
- { "adma-mqs0", IMX_SC_R_MQS_0, 1, 0 },
- { "adma-dsp", IMX_SC_R_DSP, 1, 0 },
- { "adma-dsp-ram", IMX_SC_R_DSP_RAM, 1, 0 },
- { "adma-can", IMX_SC_R_CAN_0, 3, 1 },
- { "adma-ftm", IMX_SC_R_FTM_0, 2, 1 },
- { "adma-lpi2c", IMX_SC_R_I2C_0, 4, 1 },
- { "adma-adc", IMX_SC_R_ADC_0, 1, 1 },
- { "adma-lcd", IMX_SC_R_LCD_0, 1, 1 },
- { "adma-lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, 1 },
- { "adma-lpuart", IMX_SC_R_UART_0, 4, 1 },
- { "adma-lpspi", IMX_SC_R_SPI_0, 4, 1 },
-
- /* VPU SS */
- { "vpu", IMX_SC_R_VPU, 1, 0 },
- { "vpu-pid", IMX_SC_R_VPU_PID0, 8, 1 },
- { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, 0 },
- { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, 0 },
+ { "usb", IMX_SC_R_USB_0, 2, true, 0 },
+ { "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 },
+ { "usb2", IMX_SC_R_USB_2, 1, false, 0 },
+ { "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 },
+ { "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 },
+ { "enet", IMX_SC_R_ENET_0, 2, true, 0 },
+ { "nand", IMX_SC_R_NAND, 1, false, 0 },
+ { "mlb", IMX_SC_R_MLB_0, 1, true, 0 },
+
+ /* AUDIO SS */
+ { "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
+ { "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
+ { "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
+ { "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
+ { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
+ { "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
+ { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
+ { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
+ { "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
+ { "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
+ { "sai", IMX_SC_R_SAI_0, 3, true, 0 },
+ { "amix", IMX_SC_R_AMIX, 1, false, 0 },
+ { "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
+ { "dsp", IMX_SC_R_DSP, 1, false, 0 },
+ { "dsp-ram", IMX_SC_R_DSP_RAM, 1, false, 0 },
+
+ /* DMA SS */
+ { "can", IMX_SC_R_CAN_0, 3, true, 0 },
+ { "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
+ { "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
+ { "adc", IMX_SC_R_ADC_0, 1, true, 0 },
+ { "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
+ { "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
+ { "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
+ { "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
+
+ /* VPU SS */
+ { "vpu", IMX_SC_R_VPU, 1, false, 0 },
+ { "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
+ { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
+ { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
/* GPU SS */
- { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, 1 },
+ { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
/* HSIO SS */
- { "hsio-pcie-b", IMX_SC_R_PCIE_B, 1, 0 },
- { "hsio-serdes-1", IMX_SC_R_SERDES_1, 1, 0 },
- { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, 0 },
+ { "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 },
+ { "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 },
+ { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 },
+
+ /* MIPI SS */
+ { "mipi0", IMX_SC_R_MIPI_0, 1, false, 0 },
+ { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
+ { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
- /* MIPI/LVDS SS */
- { "mipi0", IMX_SC_R_MIPI_0, 1, 0 },
- { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, 0 },
- { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, 1 },
- { "lvds0", IMX_SC_R_LVDS_0, 1, 0 },
+ /* LVDS SS */
+ { "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
/* DC SS */
- { "dc0", IMX_SC_R_DC_0, 1, 0 },
- { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, 1 },
+ { "dc0", IMX_SC_R_DC_0, 1, false, 0 },
+ { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
@@ -236,7 +243,7 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
if (pd_ranges->postfix)
snprintf(sc_pd->name, sizeof(sc_pd->name),
- "%s%i", pd_ranges->name, idx);
+ "%s%i", pd_ranges->name, pd_ranges->start_from + idx);
else
snprintf(sc_pd->name, sizeof(sc_pd->name),
"%s", pd_ranges->name);
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
new file mode 100644
index 000000000000..fd4999388ff1
--- /dev/null
+++ b/drivers/firmware/trusted_foundations.c
@@ -0,0 +1,176 @@
+/*
+ * Trusted Foundations support for ARM CPUs
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
+#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT 0xfffff100
+
+#define TF_CACHE_ENABLE 1
+#define TF_CACHE_DISABLE 2
+
+#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
+
+#define TF_CPU_PM 0xfffffffc
+#define TF_CPU_PM_S3 0xffffffe3
+#define TF_CPU_PM_S2 0xffffffe6
+#define TF_CPU_PM_S2_NO_MC_CLK 0xffffffe5
+#define TF_CPU_PM_S1 0xffffffe4
+#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+
+static unsigned long cpu_boot_addr;
+
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+{
+ register u32 r0 asm("r0") = type;
+ register u32 r1 asm("r1") = arg1;
+ register u32 r2 asm("r2") = arg2;
+
+ asm volatile(
+ ".arch_extension sec\n\t"
+ "stmfd sp!, {r4 - r11}\n\t"
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ "mov r3, #0\n\t"
+ "mov r4, #0\n\t"
+ "smc #0\n\t"
+ "ldmfd sp!, {r4 - r11}\n\t"
+ :
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "memory", "r3", "r12", "lr");
+}
+
+static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+{
+ cpu_boot_addr = boot_addr;
+ tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
+
+ return 0;
+}
+
+static int tf_prepare_idle(unsigned long mode)
+{
+ switch (mode) {
+ case TF_PM_MODE_LP0:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1_NO_MC_CLK:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2_NOFLUSH_L2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+ cpu_boot_addr);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+ u32 l2x0_way_mask = 0xff;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+ l2x0_way_mask = 0xffff;
+
+ if (val == L2X0_CTRL_EN)
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ l2x0_saved_regs.aux_ctrl);
+ else
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+ l2x0_way_mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int tf_init_cache(void)
+{
+ outer_cache.write_sec = tf_cache_write_sec;
+
+ return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
+static const struct firmware_ops trusted_foundations_ops = {
+ .set_cpu_boot_addr = tf_set_cpu_boot_addr,
+ .prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+ .l2x0_init = tf_init_cache,
+#endif
+};
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * we are not using version information for now since currently
+ * supported SMCs are compatible with all TF releases
+ */
+ register_firmware_ops(&trusted_foundations_ops);
+}
+
+void of_register_trusted_foundations(void)
+{
+ struct device_node *node;
+ struct trusted_foundations_platform_data pdata;
+ int err;
+
+ node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+ if (!node)
+ return;
+
+ err = of_property_read_u32(node, "tlm,version-major",
+ &pdata.version_major);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-major property\n");
+ err = of_property_read_u32(node, "tlm,version-minor",
+ &pdata.version_minor);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-minor property\n");
+ register_trusted_foundations(&pdata);
+}
+
+bool trusted_foundations_registered(void)
+{
+ return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 2771df6df379..c6d0724da4db 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -90,9 +90,6 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
int ret;
struct zynqmp_pm_query_data qdata = {0};
- if (!eemi_ops)
- return -ENXIO;
-
switch (pm_id) {
case PM_GET_API_VERSION:
ret = eemi_ops->get_api_version(&pm_api_version);
@@ -163,21 +160,14 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
strcpy(debugfs_buf, "");
- if (*off != 0 || len == 0)
+ if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
return -EINVAL;
- kern_buff = kzalloc(len, GFP_KERNEL);
- if (!kern_buff)
- return -ENOMEM;
-
+ kern_buff = memdup_user_nul(ptr, len);
+ if (IS_ERR(kern_buff))
+ return PTR_ERR(kern_buff);
tmp_buff = kern_buff;
- ret = strncpy_from_user(kern_buff, ptr, len);
- if (ret < 0) {
- ret = -EFAULT;
- goto err;
- }
-
/* Read the API name from a user request */
pm_api_req = strsep(&kern_buff, " ");
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 98f936125643..fd3d83745208 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -24,6 +24,8 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static const struct zynqmp_eemi_ops *eemi_ops_tbl;
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -538,6 +540,49 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
}
/**
+ * zynqmp_pm_fpga_load - Perform the fpga load
+ * @address: Address to write to
+ * @size: pl bitstream size
+ * @flags: Bitstream type
+ * -XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
+ * -XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ *
+ * This function provides access to pmufw. To transfer
+ * the required bitstream into PL.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
+ upper_32_bits(address), size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_fpga_get_status - Read value from PCAP status register
+ * @value: Value to read
+ *
+ * This function provides access to the pmufw to get the PCAP
+ * status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
@@ -640,6 +685,8 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.request_node = zynqmp_pm_request_node,
.release_node = zynqmp_pm_release_node,
.set_requirement = zynqmp_pm_set_requirement,
+ .fpga_load = zynqmp_pm_fpga_load,
+ .fpga_get_status = zynqmp_pm_fpga_get_status,
};
/**
@@ -649,7 +696,11 @@ static const struct zynqmp_eemi_ops eemi_ops = {
*/
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
{
- return &eemi_ops;
+ if (eemi_ops_tbl)
+ return eemi_ops_tbl;
+ else
+ return ERR_PTR(-EPROBE_DEFER);
+
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
@@ -694,6 +745,9 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+ /* Assign eemi_ops_table */
+ eemi_ops_tbl = &eemi_ops;
+
zynqmp_pm_api_debugfs_init();
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index c20445b867ae..d892f3efcd76 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -204,4 +204,13 @@ config FPGA_DFL_PCI
To compile this as a module, choose M here.
+config FPGA_MGR_ZYNQMP_FPGA
+ tristate "Xilinx ZynqMP FPGA"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ FPGA manager driver support for Xilinx ZynqMP FPGAs.
+ This driver uses the processor configuration port(PCAP)
+ to configure the programmable logic(PL) through PS
+ on ZynqMP SoC.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index c0dd4c82fbdb..312b9371742f 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index e18a786fc943..c438722bf4e1 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -102,7 +102,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
goto unlock_vm;
}
- pinned = get_user_pages_fast(region->user_addr, npages, 1,
+ pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
region->pages);
if (pinned < 0) {
ret = pinned;
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
new file mode 100644
index 000000000000..f7cbaadf49ab
--- /dev/null
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+/* Constant Definitions */
+#define IXR_FPGA_DONE_MASK BIT(3)
+
+/**
+ * struct zynqmp_fpga_priv - Private data structure
+ * @dev: Device data structure
+ * @flags: flags which is used to identify the bitfile type
+ */
+struct zynqmp_fpga_priv {
+ struct device *dev;
+ u32 flags;
+};
+
+static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ struct zynqmp_fpga_priv *priv;
+
+ priv = mgr->priv;
+ priv->flags = info->flags;
+
+ return 0;
+}
+
+static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct zynqmp_fpga_priv *priv;
+ dma_addr_t dma_addr;
+ u32 eemi_flags = 0;
+ char *kbuf;
+ int ret;
+
+ if (!eemi_ops || !eemi_ops->fpga_load)
+ return -ENXIO;
+
+ priv = mgr->priv;
+
+ kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
+
+ ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+
+ dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ return 0;
+}
+
+static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ u32 status;
+
+ if (!eemi_ops || !eemi_ops->fpga_get_status)
+ return FPGA_MGR_STATE_UNKNOWN;
+
+ eemi_ops->fpga_get_status(&status);
+ if (status & IXR_FPGA_DONE_MASK)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops zynqmp_fpga_ops = {
+ .state = zynqmp_fpga_ops_state,
+ .write_init = zynqmp_fpga_ops_write_init,
+ .write = zynqmp_fpga_ops_write,
+ .write_complete = zynqmp_fpga_ops_write_complete,
+};
+
+static int zynqmp_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager",
+ &zynqmp_fpga_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mgr);
+
+ ret = fpga_mgr_register(mgr);
+ if (ret) {
+ dev_err(dev, "unable to register FPGA manager");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_fpga_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pcap-fpga", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match);
+
+static struct platform_driver zynqmp_fpga_driver = {
+ .probe = zynqmp_fpga_probe,
+ .remove = zynqmp_fpga_remove,
+ .driver = {
+ .name = "zynqmp_fpga_manager",
+ .of_match_table = of_match_ptr(zynqmp_fpga_of_match),
+ },
+};
+
+module_platform_driver(zynqmp_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx ZynqMp FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0f91600c27ae..8023d03ec362 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -26,12 +26,12 @@ config GPIOLIB_FASTPATH_LIMIT
range 32 512
default 512
help
- This adjusts the point at which certain APIs will switch from
- using a stack allocated buffer to a dynamically allocated buffer.
+ This adjusts the point at which certain APIs will switch from
+ using a stack allocated buffer to a dynamically allocated buffer.
- You shouldn't need to change this unless you really need to
- optimize either stack space or performance. Change this carefully
- since setting an incorrect value could cause stack corruption.
+ You shouldn't need to change this unless you really need to
+ optimize either stack space or performance. Change this carefully
+ since setting an incorrect value could cause stack corruption.
config OF_GPIO
def_bool y
@@ -286,6 +286,19 @@ config GPIO_IOP
If unsure, say N.
+config GPIO_IXP4XX
+ bool "Intel IXP4xx GPIO"
+ depends on ARM # For <asm/mach-types.h>
+ depends on ARCH_IXP4XX
+ select GPIO_GENERIC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ IXP4xx series of chips.
+
+ If unsure, say N.
+
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
depends on CPU_LOONGSON2 || CPU_LOONGSON3
@@ -319,7 +332,7 @@ config GPIO_MENZ127
depends on MCB
select GPIO_GENERIC
help
- Say yes here to support the MEN 16Z127 GPIO Controller
+ Say yes here to support the MEN 16Z127 GPIO Controller
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
@@ -329,20 +342,6 @@ config GPIO_MM_LANTIQ
(EBU) found on Lantiq SoCs. The gpios are output only as they are
created by attaching a 16bit latch to the bus.
-config GPIO_MOCKUP
- tristate "GPIO Testing Driver"
- depends on GPIOLIB && SYSFS
- select GPIO_SYSFS
- select GPIOLIB_IRQCHIP
- select IRQ_SIM
- help
- This enables GPIO Testing driver, which provides a way to test GPIO
- subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
- must be selected for this test.
- User could use it through the script in
- tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
- it.
-
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -861,11 +860,11 @@ config GPIO_MAX732X
Input and Output (designed by 'P'). The combinations are listed
below:
- 8 bits: max7319 (8I), max7320 (8O), max7321 (8P),
- max7322 (4I4O), max7323 (4P4O)
+ 8 bits: max7319 (8I), max7320 (8O), max7321 (8P),
+ max7322 (4I4O), max7323 (4P4O)
- 16 bits: max7324 (8I8O), max7325 (8P8O),
- max7326 (4I12O), max7327 (4P12O)
+ 16 bits: max7324 (8I8O), max7325 (8P8O),
+ max7326 (4I12O), max7327 (4P12O)
Board setup code must specify the model to use, and the start
number for these GPIOs.
@@ -892,17 +891,17 @@ config GPIO_PCA953X
SMBus I/O expanders, made mostly by NXP or TI. Compatible
models include:
- 4 bits: pca9536, pca9537
+ 4 bits: pca9536, pca9537
- 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
+ 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
- 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
- tca6416
+ 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
+ tca6416
- 24 bits: tca6424
+ 24 bits: tca6424
- 40 bits: pca9505, pca9698
+ 40 bits: pca9505, pca9698
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -924,7 +923,7 @@ config GPIO_PCF857X
8 bits: pcf8574, pcf8574a, pca8574, pca8574a,
pca9670, pca9672, pca9674, pca9674a,
- max7328, max7329
+ max7328, max7329
16 bits: pcf8575, pcf8575c, pca8575,
pca9671, pca9673, pca9675
@@ -1046,9 +1045,9 @@ config HTC_EGPIO
bool "HTC EGPIO support"
depends on GPIOLIB && ARM
help
- This driver supports the CPLD egpio chip present on
- several HTC phones. It provides basic support for input
- pins, output pins, and irqs.
+ This driver supports the CPLD egpio chip present on
+ several HTC phones. It provides basic support for input
+ pins, output pins, and irqs.
config GPIO_JANZ_TTL
tristate "Janz VMOD-TTL Digital IO Module"
@@ -1084,7 +1083,7 @@ config GPIO_LP873X
on LP873X PMICs.
This driver can also be built as a module. If so, the module will be
- called gpio-lp873x.
+ called gpio-lp873x.
config GPIO_LP87565
tristate "TI LP87565 GPIO"
@@ -1111,6 +1110,13 @@ config GPIO_MAX77620
driver also provides interrupt support for each of the gpios.
Say yes here to enable the max77620 to be used as gpio controller.
+config GPIO_MAX77650
+ tristate "Maxim MAX77650/77651 GPIO support"
+ depends on MFD_MAX77650
+ help
+ GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
+ These chips have a single pin that can be configured as GPIO.
+
config GPIO_MSIC
bool "Intel MSIC mixed signal gpio support"
depends on (X86 || COMPILE_TEST) && MFD_INTEL_MSIC
@@ -1315,6 +1321,13 @@ config GPIO_MERRIFIELD
help
Say Y here to support Intel Merrifield GPIO.
+config GPIO_MLXBF
+ tristate "Mellanox BlueField SoC GPIO"
+ depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
+ select GPIO_GENERIC
+ help
+ Say Y here if you want GPIO support on Mellanox BlueField SoC.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on X86 || COMPILE_TEST
@@ -1435,10 +1448,22 @@ config GPIO_VIPERBOARD
Say yes here to access the GPIO signals of Nano River
Technologies Viperboard. There are two GPIO chips on the
board: gpioa and gpiob.
- See viperboard API specification and Nano
- River Tech's viperboard.h for detailed meaning
- of the module parameters.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
endmenu
+config GPIO_MOCKUP
+ tristate "GPIO Testing Driver"
+ depends on GPIOLIB
+ select IRQ_SIM
+ help
+ This enables GPIO Testing driver, which provides a way to test GPIO
+ subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
+ must be selected for this test.
+ User could use it through the script in
+ tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
+ it.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 54d55274b93a..6700eee860b7 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
+obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
@@ -80,11 +81,13 @@ obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
+obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
+obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index fb7b620763a2..e81307f9754e 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -1,21 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* 74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
*
* Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/spi/spi.h>
-#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
#define GEN_74X164_NUMBER_GPIOS 8
@@ -116,10 +113,9 @@ static int gen_74x164_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- if (of_property_read_u32(spi->dev.of_node, "registers-number",
- &nregs)) {
- dev_err(&spi->dev,
- "Missing registers-number property in the DT.\n");
+ ret = device_property_read_u32(&spi->dev, "registers-number", &nregs);
+ if (ret) {
+ dev_err(&spi->dev, "Missing 'registers-number' property.\n");
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 49616ec815ee..04247075091d 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -106,7 +106,6 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mmio_74xx_gpio_probe(struct platform_device *pdev)
{
struct mmio_74xx_gpio_priv *priv;
- struct resource *res;
void __iomem *dat;
int err;
@@ -116,8 +115,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
priv->flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dat = devm_ioremap_resource(&pdev->dev, res);
+ dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
return PTR_ERR(dat);
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index 9b78dc837603..1ffd7c2d1285 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -78,7 +78,6 @@ static int pt_gpio_probe(struct platform_device *pdev)
struct acpi_device *acpi_dev;
acpi_handle handle = ACPI_HANDLE(dev);
struct pt_gpio_chip *pt_gpio;
- struct resource *res_mem;
int ret = 0;
if (acpi_bus_get_device(handle, &acpi_dev)) {
@@ -90,12 +89,7 @@ static int pt_gpio_probe(struct platform_device *pdev)
if (!pt_gpio)
return -ENOMEM;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(&pdev->dev, "Failed to get MMIO resource for PT GPIO.\n");
- return -EINVAL;
- }
- pt_gpio->reg_base = devm_ioremap_resource(dev, res_mem);
+ pt_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pt_gpio->reg_base)) {
dev_err(&pdev->dev, "Failed to map MMIO resource for PT GPIO.\n");
return PTR_ERR(pt_gpio->reg_base);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 217507002dbc..0f1b55c7c361 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1156,15 +1156,13 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *gpio_id;
struct aspeed_gpio *gpio;
- struct resource *res;
int rc, i, banks;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index c5536a509b59..9fa6d3a967d2 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -568,7 +568,6 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
- struct resource *res;
struct bcm_kona_gpio_bank *bank;
struct bcm_kona_gpio *kona_gpio;
struct gpio_chip *chip;
@@ -618,8 +617,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
return -ENXIO;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- kona_gpio->reg_base = devm_ioremap_resource(dev, res);
+ kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_gpio->reg_base)) {
ret = -ENXIO;
goto err_irq_domain;
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index aec8d5df9f30..712ae212b0b4 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -148,7 +148,6 @@ static struct irq_chip cdns_gpio_irqchip = {
static int cdns_gpio_probe(struct platform_device *pdev)
{
struct cdns_gpio_chip *cgpio;
- struct resource *res;
int ret, irq;
u32 dir_prev;
u32 num_gpios = 32;
@@ -157,8 +156,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
if (!cgpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cgpio->regs = devm_ioremap_resource(&pdev->dev, res);
+ cgpio->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cgpio->regs))
return PTR_ERR(cgpio->regs);
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 52fd63f02134..0fbbb0edc0ba 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -19,7 +19,6 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
void __iomem *dat, *dir;
struct gpio_chip *gc;
- struct resource *res;
int err, id;
if (!np)
@@ -33,13 +32,11 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dat = devm_ioremap_resource(&pdev->dev, res);
+ dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
return PTR_ERR(dat);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dir = devm_ioremap_resource(&pdev->dev, res);
+ dir = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dir))
return PTR_ERR(dir);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 84ae04402f70..d3eda65fd6d3 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -655,7 +655,6 @@ MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
static int dwapb_gpio_probe(struct platform_device *pdev)
{
unsigned int i;
- struct resource *res;
struct dwapb_gpio *gpio;
int err;
struct device *dev = &pdev->dev;
@@ -688,8 +687,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (!gpio->ports)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->regs = devm_ioremap_resource(&pdev->dev, res);
+ gpio->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 45fe125823a8..8ff8ce2970d9 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -225,7 +225,6 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ftgpio_gpio *g;
int irq;
int ret;
@@ -236,8 +235,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
g->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g->base = devm_ioremap_resource(dev, res);
+ g->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g->base))
return PTR_ERR(g->base);
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index a7b17897356e..e5fa00f8145f 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -208,7 +208,6 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
static int hlwd_gpio_probe(struct platform_device *pdev)
{
struct hlwd_gpio *hlwd;
- struct resource *regs_resource;
u32 ngpios;
int res;
@@ -216,8 +215,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
if (!hlwd)
return -ENOMEM;
- regs_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hlwd->regs = devm_ioremap_resource(&pdev->dev, regs_resource);
+ hlwd->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hlwd->regs))
return PTR_ERR(hlwd->regs);
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 8d62db447ec1..11b77d868c89 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -21,7 +21,6 @@
static int iop3xx_gpio_probe(struct platform_device *pdev)
{
- struct resource *res;
struct gpio_chip *gc;
void __iomem *base;
int err;
@@ -30,8 +29,7 @@ static int iop3xx_gpio_probe(struct platform_device *pdev)
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
new file mode 100644
index 000000000000..4b1cf7ea858d
--- /dev/null
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// IXP4 GPIO driver
+// Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+//
+// based on previous work and know-how from:
+// Deepak Saxena <dsaxena@plexity.net>
+
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+/* Include that go away with DT transition */
+#include <linux/irqchip/irq-ixp4xx.h>
+
+#include <asm/mach-types.h>
+
+#define IXP4XX_REG_GPOUT 0x00
+#define IXP4XX_REG_GPOE 0x04
+#define IXP4XX_REG_GPIN 0x08
+#define IXP4XX_REG_GPIS 0x0C
+#define IXP4XX_REG_GPIT1 0x10
+#define IXP4XX_REG_GPIT2 0x14
+#define IXP4XX_REG_GPCLK 0x18
+#define IXP4XX_REG_GPDBSEL 0x1C
+
+/*
+ * The hardware uses 3 bits to indicate interrupt "style".
+ * we clear and set these three bits accordingly. The lower 24
+ * bits in two registers (GPIT1 and GPIT2) are used to set up
+ * the style for 8 lines each for a total of 16 GPIO lines.
+ */
+#define IXP4XX_GPIO_STYLE_ACTIVE_HIGH 0x0
+#define IXP4XX_GPIO_STYLE_ACTIVE_LOW 0x1
+#define IXP4XX_GPIO_STYLE_RISING_EDGE 0x2
+#define IXP4XX_GPIO_STYLE_FALLING_EDGE 0x3
+#define IXP4XX_GPIO_STYLE_TRANSITIONAL 0x4
+#define IXP4XX_GPIO_STYLE_MASK GENMASK(2, 0)
+#define IXP4XX_GPIO_STYLE_SIZE 3
+
+/**
+ * struct ixp4xx_gpio - IXP4 GPIO state container
+ * @dev: containing device for this instance
+ * @fwnode: the fwnode for this GPIO chip
+ * @gc: gpiochip for this instance
+ * @domain: irqdomain for this chip instance
+ * @base: remapped I/O-memory base
+ * @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
+ * 0: level triggered
+ */
+struct ixp4xx_gpio {
+ struct device *dev;
+ struct fwnode_handle *fwnode;
+ struct gpio_chip gc;
+ struct irq_domain *domain;
+ void __iomem *base;
+ unsigned long long irq_edge;
+};
+
+/**
+ * struct ixp4xx_gpio_map - IXP4 GPIO to parent IRQ map
+ * @gpio_offset: offset of the IXP4 GPIO line
+ * @parent_hwirq: hwirq on the parent IRQ controller
+ */
+struct ixp4xx_gpio_map {
+ int gpio_offset;
+ int parent_hwirq;
+};
+
+/* GPIO lines 0..12 have corresponding IRQs, GPIOs 13..15 have no IRQs */
+const struct ixp4xx_gpio_map ixp4xx_gpiomap[] = {
+ { .gpio_offset = 0, .parent_hwirq = 6 },
+ { .gpio_offset = 1, .parent_hwirq = 7 },
+ { .gpio_offset = 2, .parent_hwirq = 19 },
+ { .gpio_offset = 3, .parent_hwirq = 20 },
+ { .gpio_offset = 4, .parent_hwirq = 21 },
+ { .gpio_offset = 5, .parent_hwirq = 22 },
+ { .gpio_offset = 6, .parent_hwirq = 23 },
+ { .gpio_offset = 7, .parent_hwirq = 24 },
+ { .gpio_offset = 8, .parent_hwirq = 25 },
+ { .gpio_offset = 9, .parent_hwirq = 26 },
+ { .gpio_offset = 10, .parent_hwirq = 27 },
+ { .gpio_offset = 11, .parent_hwirq = 28 },
+ { .gpio_offset = 12, .parent_hwirq = 29 },
+};
+
+static void ixp4xx_gpio_irq_ack(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
+}
+
+static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ /* ACK when unmasking if not edge-triggered */
+ if (!(g->irq_edge & BIT(d->hwirq)))
+ ixp4xx_gpio_irq_ack(d);
+
+ irq_chip_unmask_parent(d);
+}
+
+static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ int line = d->hwirq;
+ unsigned long flags;
+ u32 int_style;
+ u32 int_reg;
+ u32 val;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (line >= 8) {
+ /* pins 8-15 */
+ line -= 8;
+ int_reg = IXP4XX_REG_GPIT2;
+ } else {
+ /* pins 0-7 */
+ int_reg = IXP4XX_REG_GPIT1;
+ }
+
+ spin_lock_irqsave(&g->gc.bgpio_lock, flags);
+
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+
+ spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+
+ /* This parent only accept level high (asserted) */
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip ixp4xx_gpio_irqchip = {
+ .name = "IXP4GPIO",
+ .irq_ack = ixp4xx_gpio_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = ixp4xx_gpio_irq_unmask,
+ .irq_set_type = ixp4xx_gpio_irq_set_type,
+};
+
+static int ixp4xx_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ /* This goes away when we transition to DT */
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ixp4xx_gpio_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_gpio *g = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_gpio_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ dev_dbg(g->dev, "allocate IRQ %d..%d, hwirq %lu..%lu\n",
+ irq, irq + nr_irqs - 1,
+ hwirq, hwirq + nr_irqs - 1);
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_fwspec parent_fwspec;
+ const struct ixp4xx_gpio_map *map;
+ int j;
+
+ /* Not all lines support IRQs */
+ for (j = 0; j < ARRAY_SIZE(ixp4xx_gpiomap); j++) {
+ map = &ixp4xx_gpiomap[j];
+ if (map->gpio_offset == hwirq)
+ break;
+ }
+ if (j == ARRAY_SIZE(ixp4xx_gpiomap)) {
+ dev_err(g->dev, "can't look up hwirq %lu\n", hwirq);
+ return -EINVAL;
+ }
+ dev_dbg(g->dev, "found parent hwirq %u\n", map->parent_hwirq);
+
+ /*
+ * We set handle_bad_irq because the .set_type() should
+ * always be invoked and set the right type of handler.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixp4xx_gpio_irqchip,
+ g,
+ handle_bad_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+
+ /*
+ * Create a IRQ fwspec to send up to the parent irqdomain:
+ * specify the hwirq we address on the parent and tie it
+ * all together up the chain.
+ */
+ parent_fwspec.fwnode = d->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = map->parent_hwirq;
+ /* This parent only handles asserted level IRQs */
+ parent_fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ dev_dbg(g->dev, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq + i, map->parent_hwirq);
+ ret = irq_domain_alloc_irqs_parent(d, irq + i, 1,
+ &parent_fwspec);
+ if (ret)
+ dev_err(g->dev,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ map->parent_hwirq, hwirq);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops ixp4xx_gpio_irqdomain_ops = {
+ .translate = ixp4xx_gpio_irq_domain_translate,
+ .alloc = ixp4xx_gpio_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int ixp4xx_gpio_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent;
+ struct resource *res;
+ struct ixp4xx_gpio *g;
+ int ret;
+ int i;
+
+ g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+ g->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ g->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(g->base)) {
+ dev_err(dev, "ioremap error\n");
+ return PTR_ERR(g->base);
+ }
+
+ /*
+ * Make sure GPIO 14 and 15 are NOT used as clocks but GPIO on
+ * specific machines.
+ */
+ if (machine_is_dsmg600() || machine_is_nas100d())
+ __raw_writel(0x0, g->base + IXP4XX_REG_GPCLK);
+
+ /*
+ * This is a very special big-endian ARM issue: when the IXP4xx is
+ * run in big endian mode, all registers in the machine are switched
+ * around to the CPU-native endianness. As you see mostly in the
+ * driver we use __raw_readl()/__raw_writel() to access the registers
+ * in the appropriate order. With the GPIO library we need to specify
+ * byte order explicitly, so this flag needs to be set when compiling
+ * for big endian.
+ */
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+ flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+#else
+ flags = 0;
+#endif
+
+ /* Populate and register gpio chip */
+ ret = bgpio_init(&g->gc, dev, 4,
+ g->base + IXP4XX_REG_GPIN,
+ g->base + IXP4XX_REG_GPOUT,
+ NULL,
+ NULL,
+ g->base + IXP4XX_REG_GPOE,
+ flags);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ g->gc.to_irq = ixp4xx_gpio_to_irq;
+ g->gc.ngpio = 16;
+ g->gc.label = "IXP4XX_GPIO_CHIP";
+ /*
+ * TODO: when we have migrated to device tree and all GPIOs
+ * are fetched using phandles, set this to -1 to get rid of
+ * the fixed gpiochip base.
+ */
+ g->gc.base = 0;
+ g->gc.parent = &pdev->dev;
+ g->gc.owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ if (ret) {
+ dev_err(dev, "failed to add SoC gpiochip\n");
+ return ret;
+ }
+
+ /*
+ * When we convert to device tree we will simply look up the
+ * parent irqdomain using irq_find_host(parent) as parent comes
+ * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
+ * the fwnode. For now we need this boardfile style code.
+ */
+ if (np) {
+ struct device_node *irq_parent;
+
+ irq_parent = of_irq_find_parent(np);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+ g->fwnode = of_node_to_fwnode(np);
+ } else {
+ parent = ixp4xx_get_irq_domain();
+ g->fwnode = irq_domain_alloc_fwnode(g->base);
+ if (!g->fwnode) {
+ dev_err(dev, "no domain base\n");
+ return -ENODEV;
+ }
+ }
+ g->domain = irq_domain_create_hierarchy(parent,
+ IRQ_DOMAIN_FLAG_HIERARCHY,
+ ARRAY_SIZE(ixp4xx_gpiomap),
+ g->fwnode,
+ &ixp4xx_gpio_irqdomain_ops,
+ g);
+ if (!g->domain) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev, "no hierarchical irq domain\n");
+ return ret;
+ }
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ if (!np) {
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_gpiomap); i++) {
+ const struct ixp4xx_gpio_map *map = &ixp4xx_gpiomap[i];
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ /* This is the hwirq for the GPIO line side of things */
+ fwspec.param[0] = map->gpio_offset;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(g->domain,
+ -1, /* just pick something */
+ 1,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ map->gpio_offset, map->parent_hwirq,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, g);
+ dev_info(dev, "IXP4 GPIO @%p registered\n", g->base);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_gpio_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-gpio",
+ },
+ {},
+};
+
+
+static struct platform_driver ixp4xx_gpio_driver = {
+ .driver = {
+ .name = "ixp4xx-gpio",
+ .of_match_table = of_match_ptr(ixp4xx_gpio_of_match),
+ },
+ .probe = ixp4xx_gpio_probe,
+};
+builtin_platform_driver(ixp4xx_gpio_driver);
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 6b9bf8b7bf16..b97a91166497 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -147,7 +147,6 @@ static int ttl_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ttl_module *mod;
struct gpio_chip *gpio;
- struct resource *res;
int ret;
pdata = dev_get_platdata(&pdev->dev);
@@ -164,8 +163,7 @@ static int ttl_probe(struct platform_device *pdev)
spin_lock_init(&mod->lock);
/* get access to the MODULbus registers for this module */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mod->regs = devm_ioremap_resource(dev, res);
+ mod->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mod->regs))
return PTR_ERR(mod->regs);
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index fca84ccac35c..1b1ee94eeab4 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -47,15 +47,13 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
- struct resource *res;
int ret;
gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio_reg_base = devm_ioremap_resource(dev, res);
+ gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index d441dbaed7a3..d711ae06747e 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -340,10 +340,7 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev)
index = of_property_match_string(dev->of_node, "reg-names", "gpio");
if (index < 0) {
/* To support backward compatibility take the first resource */
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gc->base = devm_ioremap_resource(dev, res);
+ gc->base = devm_platform_ioremap_resource(pdev, 0);
} else {
struct resource res;
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
new file mode 100644
index 000000000000..3f03f4e8956c
--- /dev/null
+++ b/drivers/gpio/gpio-max77650.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// GPIO driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77650_GPIO_DIR_MASK BIT(0)
+#define MAX77650_GPIO_INVAL_MASK BIT(1)
+#define MAX77650_GPIO_DRV_MASK BIT(2)
+#define MAX77650_GPIO_OUTVAL_MASK BIT(3)
+#define MAX77650_GPIO_DEBOUNCE_MASK BIT(4)
+
+#define MAX77650_GPIO_DIR_OUT 0x00
+#define MAX77650_GPIO_DIR_IN BIT(0)
+#define MAX77650_GPIO_OUT_LOW 0x00
+#define MAX77650_GPIO_OUT_HIGH BIT(3)
+#define MAX77650_GPIO_DRV_OPEN_DRAIN 0x00
+#define MAX77650_GPIO_DRV_PUSH_PULL BIT(2)
+#define MAX77650_GPIO_DEBOUNCE BIT(4)
+
+#define MAX77650_GPIO_DIR_BITS(_reg) \
+ ((_reg) & MAX77650_GPIO_DIR_MASK)
+#define MAX77650_GPIO_INVAL_BITS(_reg) \
+ (((_reg) & MAX77650_GPIO_INVAL_MASK) >> 1)
+
+struct max77650_gpio_chip {
+ struct regmap *map;
+ struct gpio_chip gc;
+ int irq;
+};
+
+static int max77650_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DIR_MASK,
+ MAX77650_GPIO_DIR_IN);
+}
+
+static int max77650_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ int mask, regval;
+
+ mask = MAX77650_GPIO_DIR_MASK | MAX77650_GPIO_OUTVAL_MASK;
+ regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
+ regval |= MAX77650_GPIO_DIR_OUT;
+
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO, mask, regval);
+}
+
+static void max77650_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ int rv, regval;
+
+ regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
+
+ rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_OUTVAL_MASK, regval);
+ if (rv)
+ dev_err(gc->parent, "cannot set GPIO value: %d\n", rv);
+}
+
+static int max77650_gpio_get_value(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ unsigned int val;
+ int rv;
+
+ rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
+ if (rv)
+ return rv;
+
+ return MAX77650_GPIO_INVAL_BITS(val);
+}
+
+static int max77650_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ unsigned int val;
+ int rv;
+
+ rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
+ if (rv)
+ return rv;
+
+ return MAX77650_GPIO_DIR_BITS(val);
+}
+
+static int max77650_gpio_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long cfg)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ switch (pinconf_to_config_param(cfg)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DRV_MASK,
+ MAX77650_GPIO_DRV_OPEN_DRAIN);
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DRV_MASK,
+ MAX77650_GPIO_DRV_PUSH_PULL);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DEBOUNCE_MASK,
+ MAX77650_GPIO_DEBOUNCE);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int max77650_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ return chip->irq;
+}
+
+static int max77650_gpio_probe(struct platform_device *pdev)
+{
+ struct max77650_gpio_chip *chip;
+ struct device *dev, *parent;
+ struct i2c_client *i2c;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+ i2c = to_i2c_client(parent);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->map = dev_get_regmap(parent, NULL);
+ if (!chip->map)
+ return -ENODEV;
+
+ chip->irq = platform_get_irq_byname(pdev, "GPI");
+ if (chip->irq < 0)
+ return chip->irq;
+
+ chip->gc.base = -1;
+ chip->gc.ngpio = 1;
+ chip->gc.label = i2c->name;
+ chip->gc.parent = dev;
+ chip->gc.owner = THIS_MODULE;
+ chip->gc.can_sleep = true;
+
+ chip->gc.direction_input = max77650_gpio_direction_input;
+ chip->gc.direction_output = max77650_gpio_direction_output;
+ chip->gc.set = max77650_gpio_set_value;
+ chip->gc.get = max77650_gpio_get_value;
+ chip->gc.get_direction = max77650_gpio_get_direction;
+ chip->gc.set_config = max77650_gpio_set_config;
+ chip->gc.to_irq = max77650_gpio_to_irq;
+
+ return devm_gpiochip_add_data(dev, &chip->gc, chip);
+}
+
+static struct platform_driver max77650_gpio_driver = {
+ .driver = {
+ .name = "max77650-gpio",
+ },
+ .probe = max77650_gpio_probe,
+};
+module_platform_driver(max77650_gpio_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 GPIO driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 3134c0d2bfe4..9308081e0a4a 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -146,7 +146,6 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
static int mb86s70_gpio_probe(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip;
- struct resource *res;
int ret;
gchip = devm_kzalloc(&pdev->dev, sizeof(*gchip), GFP_KERNEL);
@@ -155,8 +154,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gchip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gchip->base = devm_ioremap_resource(&pdev->dev, res);
+ gchip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
diff --git a/drivers/gpio/gpio-mlxbf.c b/drivers/gpio/gpio-mlxbf.c
new file mode 100644
index 000000000000..894aaf55fc96
--- /dev/null
+++ b/drivers/gpio/gpio-mlxbf.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+/* Number of pins on BlueField */
+#define MLXBF_GPIO_NR 54
+
+/* Pad Electrical Controls. */
+#define MLXBF_GPIO_PAD_CONTROL_FIRST_WORD 0x0700
+#define MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD 0x0708
+#define MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD 0x0710
+#define MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD 0x0718
+
+#define MLXBF_GPIO_PIN_DIR_I 0x1040
+#define MLXBF_GPIO_PIN_DIR_O 0x1048
+#define MLXBF_GPIO_PIN_STATE 0x1000
+#define MLXBF_GPIO_SCRATCHPAD 0x20
+
+#ifdef CONFIG_PM
+struct mlxbf_gpio_context_save_regs {
+ u64 scratchpad;
+ u64 pad_control[MLXBF_GPIO_NR];
+ u64 pin_dir_i;
+ u64 pin_dir_o;
+};
+#endif
+
+/* Device state structure. */
+struct mlxbf_gpio_state {
+ struct gpio_chip gc;
+
+ /* Memory Address */
+ void __iomem *base;
+
+#ifdef CONFIG_PM
+ struct mlxbf_gpio_context_save_regs csave_regs;
+#endif
+};
+
+static int mlxbf_gpio_probe(struct platform_device *pdev)
+{
+ struct mlxbf_gpio_state *gs;
+ struct device *dev = &pdev->dev;
+ struct gpio_chip *gc;
+ int ret;
+
+ gs = devm_kzalloc(&pdev->dev, sizeof(*gs), GFP_KERNEL);
+ if (!gs)
+ return -ENOMEM;
+
+ gs->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gs->base))
+ return PTR_ERR(gs->base);
+
+ gc = &gs->gc;
+ ret = bgpio_init(gc, dev, 8,
+ gs->base + MLXBF_GPIO_PIN_STATE,
+ NULL,
+ NULL,
+ gs->base + MLXBF_GPIO_PIN_DIR_O,
+ gs->base + MLXBF_GPIO_PIN_DIR_I,
+ 0);
+ if (ret)
+ return -ENODEV;
+
+ gc->owner = THIS_MODULE;
+ gc->ngpio = MLXBF_GPIO_NR;
+
+ ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, gs);
+ dev_info(&pdev->dev, "registered Mellanox BlueField GPIO");
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mlxbf_gpio_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mlxbf_gpio_state *gs = platform_get_drvdata(pdev);
+
+ gs->csave_regs.scratchpad = readq(gs->base + MLXBF_GPIO_SCRATCHPAD);
+ gs->csave_regs.pad_control[0] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_FIRST_WORD);
+ gs->csave_regs.pad_control[1] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD);
+ gs->csave_regs.pad_control[2] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD);
+ gs->csave_regs.pad_control[3] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD);
+ gs->csave_regs.pin_dir_i = readq(gs->base + MLXBF_GPIO_PIN_DIR_I);
+ gs->csave_regs.pin_dir_o = readq(gs->base + MLXBF_GPIO_PIN_DIR_O);
+
+ return 0;
+}
+
+static int mlxbf_gpio_resume(struct platform_device *pdev)
+{
+ struct mlxbf_gpio_state *gs = platform_get_drvdata(pdev);
+
+ writeq(gs->csave_regs.scratchpad, gs->base + MLXBF_GPIO_SCRATCHPAD);
+ writeq(gs->csave_regs.pad_control[0],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[1],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[2],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[3],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD);
+ writeq(gs->csave_regs.pin_dir_i, gs->base + MLXBF_GPIO_PIN_DIR_I);
+ writeq(gs->csave_regs.pin_dir_o, gs->base + MLXBF_GPIO_PIN_DIR_O);
+
+ return 0;
+}
+#endif
+
+static const struct acpi_device_id mlxbf_gpio_acpi_match[] = {
+ { "MLNXBF02", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_gpio_acpi_match);
+
+static struct platform_driver mlxbf_gpio_driver = {
+ .driver = {
+ .name = "mlxbf_gpio",
+ .acpi_match_table = ACPI_PTR(mlxbf_gpio_acpi_match),
+ },
+ .probe = mlxbf_gpio_probe,
+#ifdef CONFIG_PM
+ .suspend = mlxbf_gpio_suspend,
+ .resume = mlxbf_gpio_resume,
+#endif
+};
+
+module_platform_driver(mlxbf_gpio_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField GPIO Driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 50bdc29591c0..6f904c874678 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -134,17 +134,6 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
unsigned long pinmask = bgpio_line2mask(gc, gpio);
bool dir = !!(gc->bgpio_dir & pinmask);
- /*
- * If the direction is OUT we read the value from the SET
- * register, and if the direction is IN we read the value
- * from the DAT register.
- *
- * If the direction bits are inverted, naturally this gets
- * inverted too.
- */
- if (gc->bgpio_dir_inverted)
- dir = !dir;
-
if (dir)
return !!(gc->read_reg(gc->reg_set) & pinmask);
else
@@ -164,14 +153,8 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- /* Exploit the fact that we know which directions are set */
- if (gc->bgpio_dir_inverted) {
- set_mask = *mask & ~gc->bgpio_dir;
- get_mask = *mask & gc->bgpio_dir;
- } else {
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
- }
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -372,11 +355,12 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- if (gc->bgpio_dir_inverted)
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- else
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+
+ if (gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (gc->reg_dir_out)
+ gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -385,11 +369,16 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
- /* Return 0 if output, 1 of input */
- if (gc->bgpio_dir_inverted)
- return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
- else
- return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ /* Return 0 if output, 1 if input */
+ if (gc->bgpio_dir_unreadable)
+ return !(gc->bgpio_dir & bgpio_line2mask(gc, gpio));
+ if (gc->reg_dir_out)
+ return !(gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio));
+ if (gc->reg_dir_in)
+ return !!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio));
+
+ /* This should not happen */
+ return 1;
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -400,11 +389,12 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- if (gc->bgpio_dir_inverted)
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
- else
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+
+ if (gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (gc->reg_dir_out)
+ gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -537,19 +527,12 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
void __iomem *dirin,
unsigned long flags)
{
- if (dirout && dirin) {
- return -EINVAL;
- } else if (dirout) {
- gc->reg_dir = dirout;
- gc->direction_output = bgpio_dir_out;
- gc->direction_input = bgpio_dir_in;
- gc->get_direction = bgpio_get_dir;
- } else if (dirin) {
- gc->reg_dir = dirin;
+ if (dirout || dirin) {
+ gc->reg_dir_out = dirout;
+ gc->reg_dir_in = dirin;
gc->direction_output = bgpio_dir_out;
gc->direction_input = bgpio_dir_in;
gc->get_direction = bgpio_get_dir;
- gc->bgpio_dir_inverted = true;
} else {
if (flags & BGPIOF_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
@@ -588,11 +571,11 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
* @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
* that setting a line to 1 in this register will turn that line into an
* output line. Conversely, setting the line to 0 will turn that line into
- * an input. Either this or @dirin can be defined, but never both.
+ * an input.
* @dirin: MMIO address for the register to set this line as INPUT. It is assumed
* that setting a line to 1 in this register will turn that line into an
* input line. Conversely, setting the line to 0 will turn that line into
- * an output. Either this or @dirout can be defined, but never both.
+ * an output.
* @flags: Different flags that will affect the behaviour of the device, such as
* endianness etc.
*/
@@ -634,8 +617,28 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
if (gc->set == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
gc->bgpio_data = gc->read_reg(gc->reg_set);
- if (gc->reg_dir && !(flags & BGPIOF_UNREADABLE_REG_DIR))
- gc->bgpio_dir = gc->read_reg(gc->reg_dir);
+
+ if (flags & BGPIOF_UNREADABLE_REG_DIR)
+ gc->bgpio_dir_unreadable = true;
+
+ /*
+ * Inspect hardware to find initial direction setting.
+ */
+ if ((gc->reg_dir_out || gc->reg_dir_in) &&
+ !(flags & BGPIOF_UNREADABLE_REG_DIR)) {
+ if (gc->reg_dir_out)
+ gc->bgpio_dir = gc->read_reg(gc->reg_dir_out);
+ else if (gc->reg_dir_in)
+ gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in);
+ /*
+ * If we have two direction registers, synchronise
+ * input setting to output setting, the library
+ * can not handle a line being input and output at
+ * the same time.
+ */
+ if (gc->reg_dir_out && gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ }
return ret;
}
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 74401e0adb29..79654fb2e50f 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -293,7 +293,6 @@ mediatek_gpio_bank_probe(struct device *dev,
static int
mediatek_gpio_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk *mtk;
@@ -304,7 +303,7 @@ mediatek_gpio_probe(struct platform_device *pdev)
if (!mtk)
return -ENOMEM;
- mtk->base = devm_ioremap_resource(dev, res);
+ mtk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mtk->base))
return PTR_ERR(mtk->base);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index f97ed32b8beb..059094ac44cb 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1038,11 +1038,9 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
static int mvebu_gpio_probe_raw(struct platform_device *pdev,
struct mvebu_gpio_chip *mvchip)
{
- struct resource *res;
void __iomem *base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1062,8 +1060,7 @@ static int mvebu_gpio_probe_raw(struct platform_device *pdev,
* per-CPU registers
*/
if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index e86e61dda4b7..b2813580c582 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -411,7 +411,6 @@ static int mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
- struct resource *iores;
int irq_base;
int err;
@@ -423,8 +422,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->base = devm_ioremap_resource(&pdev->dev, iores);
+ port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 1b19c88ea7bb..afb0e8a791e5 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -82,7 +82,6 @@ static int octeon_gpio_probe(struct platform_device *pdev)
{
struct octeon_gpio *gpio;
struct gpio_chip *chip;
- struct resource *res_mem;
void __iomem *reg_base;
int err = 0;
@@ -91,8 +90,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
chip = &gpio->chip;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 7f33024b6d83..16289bafa001 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -31,8 +31,6 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
-#define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER BIT(2)
-
struct gpio_regs {
u32 irqenable1;
u32 irqenable2;
@@ -48,13 +46,6 @@ struct gpio_regs {
u32 debounce_en;
};
-struct gpio_bank;
-
-struct gpio_omap_funcs {
- void (*idle_enable_level_quirk)(struct gpio_bank *bank);
- void (*idle_disable_level_quirk)(struct gpio_bank *bank);
-};
-
struct gpio_bank {
struct list_head node;
void __iomem *base;
@@ -62,7 +53,6 @@ struct gpio_bank {
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
struct gpio_regs context;
- struct gpio_omap_funcs funcs;
u32 saved_datain;
u32 level_mask;
u32 toggle_mask;
@@ -83,8 +73,6 @@ struct gpio_bank {
int stride;
u32 width;
int context_loss_count;
- bool workaround_enabled;
- u32 quirks;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
void (*set_dataout_multiple)(struct gpio_bank *bank,
@@ -353,6 +341,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
}
}
+/*
+ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
+ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
+ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
+ * are capable waking up the system from off mode.
+ */
+static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
+{
+ u32 no_wake = bank->non_wakeup_gpios;
+
+ if (no_wake)
+ return !!(~no_wake & gpio_mask);
+
+ return false;
+}
+
static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
@@ -363,10 +367,16 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
trigger & IRQ_TYPE_LEVEL_LOW);
omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
trigger & IRQ_TYPE_LEVEL_HIGH);
+
+ /*
+ * We need the edge detection enabled for to allow the GPIO block
+ * to be woken from idle state. Set the appropriate edge detection
+ * in addition to the level detection.
+ */
omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
+ trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH));
omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
+ trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW));
bank->context.leveldetect0 =
readl_relaxed(bank->base + bank->regs->leveldetect0);
@@ -384,13 +394,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
- if (!bank->regs->irqctrl) {
- /* On omap24xx proceed only when valid GPIO bit is set */
- if (bank->non_wakeup_gpios) {
- if (!(bank->non_wakeup_gpios & gpio_bit))
- goto exit;
- }
-
+ if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes
@@ -403,7 +407,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
-exit:
bank->level_mask =
readl_relaxed(bank->base + bank->regs->leveldetect0) |
readl_relaxed(bank->base + bank->regs->leveldetect1);
@@ -896,44 +899,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
-/*
- * Only edges can generate a wakeup event to the PRCM.
- *
- * Therefore, ensure any wake-up capable GPIOs have
- * edge-detection enabled before going idle to ensure a wakeup
- * to the PRCM is generated on a GPIO transition. (c.f. 34xx
- * NDA TRM 25.5.3.1)
- *
- * The normal values will be restored upon ->runtime_resume()
- * by writing back the values saved in bank->context.
- */
-static void __maybe_unused
-omap2_gpio_enable_level_quirk(struct gpio_bank *bank)
-{
- u32 wake_low, wake_hi;
-
- /* Enable additional edge detection for level gpios for idle */
- wake_low = bank->context.leveldetect0 & bank->context.wake_en;
- if (wake_low)
- writel_relaxed(wake_low | bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
-
- wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
- if (wake_hi)
- writel_relaxed(wake_hi | bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
-}
-
-static void __maybe_unused
-omap2_gpio_disable_level_quirk(struct gpio_bank *bank)
-{
- /* Disable edge detection for level gpios after idle */
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
-}
-
/*---------------------------------------------------------------------*/
static int omap_mpuio_suspend_noirq(struct device *dev)
@@ -1251,203 +1216,70 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
return ret;
}
-static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context);
-static void omap_gpio_unidle(struct gpio_bank *bank);
-
-static int gpio_omap_cpu_notifier(struct notifier_block *nb,
- unsigned long cmd, void *v)
+static void omap_gpio_init_context(struct gpio_bank *p)
{
- struct gpio_bank *bank;
- unsigned long flags;
+ struct omap_gpio_reg_offs *regs = p->regs;
+ void __iomem *base = p->base;
- bank = container_of(nb, struct gpio_bank, nb);
+ p->context.ctrl = readl_relaxed(base + regs->ctrl);
+ p->context.oe = readl_relaxed(base + regs->direction);
+ p->context.wake_en = readl_relaxed(base + regs->wkup_en);
+ p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
+ p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
+ p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
+ p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
+ p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
+ p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
- raw_spin_lock_irqsave(&bank->lock, flags);
- switch (cmd) {
- case CPU_CLUSTER_PM_ENTER:
- if (bank->is_suspended)
- break;
- omap_gpio_idle(bank, true);
- break;
- case CPU_CLUSTER_PM_ENTER_FAILED:
- case CPU_CLUSTER_PM_EXIT:
- if (bank->is_suspended)
- break;
- omap_gpio_unidle(bank);
- break;
- }
- raw_spin_unlock_irqrestore(&bank->lock, flags);
+ if (regs->set_dataout && p->regs->clr_dataout)
+ p->context.dataout = readl_relaxed(base + regs->set_dataout);
+ else
+ p->context.dataout = readl_relaxed(base + regs->dataout);
- return NOTIFY_OK;
+ p->context_valid = true;
}
-static const struct of_device_id omap_gpio_match[];
-
-static int omap_gpio_probe(struct platform_device *pdev)
+static void omap_gpio_restore_context(struct gpio_bank *bank)
{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- const struct of_device_id *match;
- const struct omap_gpio_platform_data *pdata;
- struct resource *res;
- struct gpio_bank *bank;
- struct irq_chip *irqc;
- int ret;
-
- match = of_match_device(of_match_ptr(omap_gpio_match), dev);
-
- pdata = match ? match->data : dev_get_platdata(dev);
- if (!pdata)
- return -EINVAL;
-
- bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
- if (!bank)
- return -ENOMEM;
-
- irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
- if (!irqc)
- return -ENOMEM;
-
- irqc->irq_startup = omap_gpio_irq_startup,
- irqc->irq_shutdown = omap_gpio_irq_shutdown,
- irqc->irq_ack = omap_gpio_ack_irq,
- irqc->irq_mask = omap_gpio_mask_irq,
- irqc->irq_unmask = omap_gpio_unmask_irq,
- irqc->irq_set_type = omap_gpio_irq_type,
- irqc->irq_set_wake = omap_gpio_wake_enable,
- irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
- irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
- irqc->name = dev_name(&pdev->dev);
- irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
- irqc->parent_device = dev;
-
- bank->irq = platform_get_irq(pdev, 0);
- if (bank->irq <= 0) {
- if (!bank->irq)
- bank->irq = -ENXIO;
- if (bank->irq != -EPROBE_DEFER)
- dev_err(dev,
- "can't get irq resource ret=%d\n", bank->irq);
- return bank->irq;
- }
-
- bank->chip.parent = dev;
- bank->chip.owner = THIS_MODULE;
- bank->dbck_flag = pdata->dbck_flag;
- bank->quirks = pdata->quirks;
- bank->stride = pdata->bank_stride;
- bank->width = pdata->bank_width;
- bank->is_mpuio = pdata->is_mpuio;
- bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
- bank->regs = pdata->regs;
-#ifdef CONFIG_OF_GPIO
- bank->chip.of_node = of_node_get(node);
-#endif
-
- if (node) {
- if (!of_property_read_bool(node, "ti,gpio-always-on"))
- bank->loses_context = true;
- } else {
- bank->loses_context = pdata->loses_context;
-
- if (bank->loses_context)
- bank->get_context_loss_count =
- pdata->get_context_loss_count;
- }
-
- if (bank->regs->set_dataout && bank->regs->clr_dataout) {
- bank->set_dataout = omap_set_gpio_dataout_reg;
- bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
- } else {
- bank->set_dataout = omap_set_gpio_dataout_mask;
- bank->set_dataout_multiple =
- omap_set_gpio_dataout_mask_multiple;
- }
-
- if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
- bank->funcs.idle_enable_level_quirk =
- omap2_gpio_enable_level_quirk;
- bank->funcs.idle_disable_level_quirk =
- omap2_gpio_disable_level_quirk;
- }
-
- raw_spin_lock_init(&bank->lock);
- raw_spin_lock_init(&bank->wa_lock);
-
- /* Static mapping, never released */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(bank->base)) {
- return PTR_ERR(bank->base);
- }
-
- if (bank->dbck_flag) {
- bank->dbck = devm_clk_get(dev, "dbclk");
- if (IS_ERR(bank->dbck)) {
- dev_err(dev,
- "Could not get gpio dbck. Disable debounce\n");
- bank->dbck_flag = false;
- } else {
- clk_prepare(bank->dbck);
- }
- }
-
- platform_set_drvdata(pdev, bank);
-
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
-
- if (bank->is_mpuio)
- omap_mpuio_init(bank);
-
- omap_gpio_mod_init(bank);
-
- ret = omap_gpio_chip_init(bank, irqc);
- if (ret) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- if (bank->dbck_flag)
- clk_unprepare(bank->dbck);
- return ret;
- }
-
- omap_gpio_show_rev(bank);
+ writel_relaxed(bank->context.wake_en,
+ bank->base + bank->regs->wkup_en);
+ writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
+ writel_relaxed(bank->context.leveldetect0,
+ bank->base + bank->regs->leveldetect0);
+ writel_relaxed(bank->context.leveldetect1,
+ bank->base + bank->regs->leveldetect1);
+ writel_relaxed(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
+ writel_relaxed(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ writel_relaxed(bank->context.dataout,
+ bank->base + bank->regs->set_dataout);
+ else
+ writel_relaxed(bank->context.dataout,
+ bank->base + bank->regs->dataout);
+ writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
- if (bank->funcs.idle_enable_level_quirk &&
- bank->funcs.idle_disable_level_quirk) {
- bank->nb.notifier_call = gpio_omap_cpu_notifier;
- cpu_pm_register_notifier(&bank->nb);
+ if (bank->dbck_enable_mask) {
+ writel_relaxed(bank->context.debounce, bank->base +
+ bank->regs->debounce);
+ writel_relaxed(bank->context.debounce_en,
+ bank->base + bank->regs->debounce_en);
}
- pm_runtime_put(dev);
-
- return 0;
-}
-
-static int omap_gpio_remove(struct platform_device *pdev)
-{
- struct gpio_bank *bank = platform_get_drvdata(pdev);
-
- if (bank->nb.notifier_call)
- cpu_pm_unregister_notifier(&bank->nb);
- list_del(&bank->node);
- gpiochip_remove(&bank->chip);
- pm_runtime_disable(&pdev->dev);
- if (bank->dbck_flag)
- clk_unprepare(bank->dbck);
-
- return 0;
+ writel_relaxed(bank->context.irqenable1,
+ bank->base + bank->regs->irqenable);
+ writel_relaxed(bank->context.irqenable2,
+ bank->base + bank->regs->irqenable2);
}
-static void omap_gpio_restore_context(struct gpio_bank *bank);
-
static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
- u32 l1 = 0, l2 = 0;
+ void __iomem *base = bank->base;
+ u32 nowake;
- if (bank->funcs.idle_enable_level_quirk)
- bank->funcs.idle_enable_level_quirk(bank);
+ bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
@@ -1456,22 +1288,15 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
goto update_gpio_context_count;
/*
- * If going to OFF, remove triggering for all
+ * If going to OFF, remove triggering for all wkup domain
* non-wakeup GPIOs. Otherwise spurious IRQs will be
* generated. See OMAP2420 Errata item 1.101.
*/
- bank->saved_datain = readl_relaxed(bank->base +
- bank->regs->datain);
- l1 = bank->context.fallingdetect;
- l2 = bank->context.risingdetect;
-
- l1 &= ~bank->enabled_non_wakeup_gpios;
- l2 &= ~bank->enabled_non_wakeup_gpios;
-
- writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
- writel_relaxed(l2, bank->base + bank->regs->risingdetect);
-
- bank->workaround_enabled = true;
+ if (!bank->loses_context && bank->enabled_non_wakeup_gpios) {
+ nowake = bank->enabled_non_wakeup_gpios;
+ omap_gpio_rmw(base, bank->regs->fallingdetect, nowake, ~nowake);
+ omap_gpio_rmw(base, bank->regs->risingdetect, nowake, ~nowake);
+ }
update_gpio_context_count:
if (bank->get_context_loss_count)
@@ -1481,8 +1306,6 @@ update_gpio_context_count:
omap_gpio_dbck_disable(bank);
}
-static void omap_gpio_init_context(struct gpio_bank *p);
-
static void omap_gpio_unidle(struct gpio_bank *bank)
{
struct device *dev = bank->chip.parent;
@@ -1504,9 +1327,6 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
omap_gpio_dbck_enable(bank);
- if (bank->funcs.idle_disable_level_quirk)
- bank->funcs.idle_disable_level_quirk(bank);
-
if (bank->loses_context) {
if (!bank->get_context_loss_count) {
omap_gpio_restore_context(bank);
@@ -1518,11 +1338,14 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
return;
}
}
+ } else {
+ /* Restore changes done for OMAP2420 errata 1.101 */
+ writel_relaxed(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ writel_relaxed(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
}
- if (!bank->workaround_enabled)
- return;
-
l = readl_relaxed(bank->base + bank->regs->datain);
/*
@@ -1572,117 +1395,35 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
}
-
- bank->workaround_enabled = false;
-}
-
-static void omap_gpio_init_context(struct gpio_bank *p)
-{
- struct omap_gpio_reg_offs *regs = p->regs;
- void __iomem *base = p->base;
-
- p->context.ctrl = readl_relaxed(base + regs->ctrl);
- p->context.oe = readl_relaxed(base + regs->direction);
- p->context.wake_en = readl_relaxed(base + regs->wkup_en);
- p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
- p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
- p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
- p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
- p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
- p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
-
- if (regs->set_dataout && p->regs->clr_dataout)
- p->context.dataout = readl_relaxed(base + regs->set_dataout);
- else
- p->context.dataout = readl_relaxed(base + regs->dataout);
-
- p->context_valid = true;
-}
-
-static void omap_gpio_restore_context(struct gpio_bank *bank)
-{
- writel_relaxed(bank->context.wake_en,
- bank->base + bank->regs->wkup_en);
- writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
- writel_relaxed(bank->context.leveldetect0,
- bank->base + bank->regs->leveldetect0);
- writel_relaxed(bank->context.leveldetect1,
- bank->base + bank->regs->leveldetect1);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- if (bank->regs->set_dataout && bank->regs->clr_dataout)
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->set_dataout);
- else
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->dataout);
- writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
-
- if (bank->dbck_enable_mask) {
- writel_relaxed(bank->context.debounce, bank->base +
- bank->regs->debounce);
- writel_relaxed(bank->context.debounce_en,
- bank->base + bank->regs->debounce_en);
- }
-
- writel_relaxed(bank->context.irqenable1,
- bank->base + bank->regs->irqenable);
- writel_relaxed(bank->context.irqenable2,
- bank->base + bank->regs->irqenable2);
}
-static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+static int gpio_omap_cpu_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
{
- struct gpio_bank *bank = dev_get_drvdata(dev);
+ struct gpio_bank *bank;
unsigned long flags;
- int error = 0;
-
- raw_spin_lock_irqsave(&bank->lock, flags);
- /* Must be idled only by CPU_CLUSTER_PM_ENTER? */
- if (bank->irq_usage) {
- error = -EBUSY;
- goto unlock;
- }
- omap_gpio_idle(bank, true);
- bank->is_suspended = true;
-unlock:
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return error;
-}
-static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
-{
- struct gpio_bank *bank = dev_get_drvdata(dev);
- unsigned long flags;
- int error = 0;
+ bank = container_of(nb, struct gpio_bank, nb);
raw_spin_lock_irqsave(&bank->lock, flags);
- /* Must be unidled only by CPU_CLUSTER_PM_ENTER? */
- if (bank->irq_usage) {
- error = -EBUSY;
- goto unlock;
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_idle(bank, true);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_unidle(bank);
+ break;
}
- omap_gpio_unidle(bank);
- bank->is_suspended = false;
-unlock:
raw_spin_unlock_irqrestore(&bank->lock, flags);
- return error;
+ return NOTIFY_OK;
}
-#ifdef CONFIG_ARCH_OMAP2PLUS
-static const struct dev_pm_ops gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
- NULL)
-};
-#else
-static const struct dev_pm_ops gpio_pm_ops;
-#endif /* CONFIG_ARCH_OMAP2PLUS */
-
-#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
.revision = OMAP24XX_GPIO_REVISION,
.direction = OMAP24XX_GPIO_OE,
@@ -1729,11 +1470,6 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.fallingdetect = OMAP4_GPIO_FALLINGDETECT,
};
-/*
- * Note that omap2 does not currently support idle modes with context loss so
- * no need to add OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER quirk flag to save
- * and restore context.
- */
static const struct omap_gpio_platform_data omap2_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
@@ -1744,14 +1480,12 @@ static const struct omap_gpio_platform_data omap3_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
- .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
- .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct of_device_id omap_gpio_match[] = {
@@ -1770,15 +1504,187 @@ static const struct of_device_id omap_gpio_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, omap_gpio_match);
+
+static int omap_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+ const struct omap_gpio_platform_data *pdata;
+ struct gpio_bank *bank;
+ struct irq_chip *irqc;
+ int ret;
+
+ match = of_match_device(of_match_ptr(omap_gpio_match), dev);
+
+ pdata = match ? match->data : dev_get_platdata(dev);
+ if (!pdata)
+ return -EINVAL;
+
+ bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
+
+ irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->irq_startup = omap_gpio_irq_startup,
+ irqc->irq_shutdown = omap_gpio_irq_shutdown,
+ irqc->irq_ack = omap_gpio_ack_irq,
+ irqc->irq_mask = omap_gpio_mask_irq,
+ irqc->irq_unmask = omap_gpio_unmask_irq,
+ irqc->irq_set_type = omap_gpio_irq_type,
+ irqc->irq_set_wake = omap_gpio_wake_enable,
+ irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
+ irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
+ irqc->name = dev_name(&pdev->dev);
+ irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
+ irqc->parent_device = dev;
+
+ bank->irq = platform_get_irq(pdev, 0);
+ if (bank->irq <= 0) {
+ if (!bank->irq)
+ bank->irq = -ENXIO;
+ if (bank->irq != -EPROBE_DEFER)
+ dev_err(dev,
+ "can't get irq resource ret=%d\n", bank->irq);
+ return bank->irq;
+ }
+
+ bank->chip.parent = dev;
+ bank->chip.owner = THIS_MODULE;
+ bank->dbck_flag = pdata->dbck_flag;
+ bank->stride = pdata->bank_stride;
+ bank->width = pdata->bank_width;
+ bank->is_mpuio = pdata->is_mpuio;
+ bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
+ bank->regs = pdata->regs;
+#ifdef CONFIG_OF_GPIO
+ bank->chip.of_node = of_node_get(node);
#endif
+ if (node) {
+ if (!of_property_read_bool(node, "ti,gpio-always-on"))
+ bank->loses_context = true;
+ } else {
+ bank->loses_context = pdata->loses_context;
+
+ if (bank->loses_context)
+ bank->get_context_loss_count =
+ pdata->get_context_loss_count;
+ }
+
+ if (bank->regs->set_dataout && bank->regs->clr_dataout) {
+ bank->set_dataout = omap_set_gpio_dataout_reg;
+ bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
+ } else {
+ bank->set_dataout = omap_set_gpio_dataout_mask;
+ bank->set_dataout_multiple =
+ omap_set_gpio_dataout_mask_multiple;
+ }
+
+ raw_spin_lock_init(&bank->lock);
+ raw_spin_lock_init(&bank->wa_lock);
+
+ /* Static mapping, never released */
+ bank->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bank->base)) {
+ return PTR_ERR(bank->base);
+ }
+
+ if (bank->dbck_flag) {
+ bank->dbck = devm_clk_get(dev, "dbclk");
+ if (IS_ERR(bank->dbck)) {
+ dev_err(dev,
+ "Could not get gpio dbck. Disable debounce\n");
+ bank->dbck_flag = false;
+ } else {
+ clk_prepare(bank->dbck);
+ }
+ }
+
+ platform_set_drvdata(pdev, bank);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ if (bank->is_mpuio)
+ omap_mpuio_init(bank);
+
+ omap_gpio_mod_init(bank);
+
+ ret = omap_gpio_chip_init(bank, irqc);
+ if (ret) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
+ return ret;
+ }
+
+ omap_gpio_show_rev(bank);
+
+ bank->nb.notifier_call = gpio_omap_cpu_notifier;
+ cpu_pm_register_notifier(&bank->nb);
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int omap_gpio_remove(struct platform_device *pdev)
+{
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+
+ cpu_pm_unregister_notifier(&bank->nb);
+ list_del(&bank->node);
+ gpiochip_remove(&bank->chip);
+ pm_runtime_disable(&pdev->dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
+
+ return 0;
+}
+
+static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+{
+ struct gpio_bank *bank = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_idle(bank, true);
+ bank->is_suspended = true;
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
+{
+ struct gpio_bank *bank = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_unidle(bank);
+ bank->is_suspended = false;
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops gpio_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
+ NULL)
+};
+
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
.remove = omap_gpio_remove,
.driver = {
.name = "omap_gpio",
.pm = &gpio_pm_ops,
- .of_match_table = of_match_ptr(omap_gpio_match),
+ .of_match_table = omap_gpio_match,
},
};
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 7e76830b3368..b7ef33f63392 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -73,6 +73,7 @@
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
static const struct i2c_device_id pca953x_id[] = {
+ { "pca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
@@ -153,6 +154,7 @@ struct pca953x_chip {
u8 irq_trig_fall[MAX_BANK];
struct irq_chip irq_chip;
#endif
+ atomic_t wakeup_path;
struct i2c_client *client;
struct gpio_chip gpio_chip;
@@ -581,6 +583,11 @@ static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ if (on)
+ atomic_inc(&chip->wakeup_path);
+ else
+ atomic_dec(&chip->wakeup_path);
+
return irq_set_irq_wake(chip->client->irq, on);
}
@@ -1100,7 +1107,10 @@ static int pca953x_suspend(struct device *dev)
regcache_cache_only(chip->regmap, true);
- regulator_disable(chip->regulator);
+ if (atomic_read(&chip->wakeup_path))
+ device_set_wakeup_path(dev);
+ else
+ regulator_disable(chip->regulator);
return 0;
}
@@ -1110,10 +1120,12 @@ static int pca953x_resume(struct device *dev)
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
- ret = regulator_enable(chip->regulator);
- if (ret != 0) {
- dev_err(dev, "Failed to enable regulator: %d\n", ret);
- return 0;
+ if (!atomic_read(&chip->wakeup_path)) {
+ ret = regulator_enable(chip->regulator);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable regulator: %d\n", ret);
+ return 0;
+ }
}
regcache_cache_only(chip->regmap, false);
@@ -1137,6 +1149,7 @@ static int pca953x_resume(struct device *dev)
#define OF_957X(__nrgpio, __int) (void *)(__nrgpio | PCA957X_TYPE | __int)
static const struct of_device_id pca953x_dt_ids[] = {
+ { .compatible = "nxp,pca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "nxp,pca9505", .data = OF_953X(40, PCA_INT), },
{ .compatible = "nxp,pca9534", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "nxp,pca9535", .data = OF_953X(16, PCA_INT), },
@@ -1152,6 +1165,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca9575", .data = OF_957X(16, PCA_INT), },
{ .compatible = "nxp,pca9698", .data = OF_953X(40, 0), },
+ { .compatible = "nxp,pcal6416", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_LATCH_INT), },
@@ -1167,6 +1181,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
+ { .compatible = "onnn,cat9554", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index bcc6be4a5cb2..26f77fdb217e 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -577,7 +577,7 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-const struct irq_domain_ops pxa_irq_domain_ops = {
+static const struct irq_domain_ops pxa_irq_domain_ops = {
.map = pxa_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
@@ -622,7 +622,6 @@ static int pxa_gpio_probe(struct platform_device *pdev)
{
struct pxa_gpio_chip *pchip;
struct pxa_gpio_bank *c;
- struct resource *res;
struct clk *clk;
struct pxa_gpio_platform_data *info;
void __iomem *gpio_reg_base;
@@ -665,11 +664,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq0 = irq0;
pchip->irq1 = irq1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+
+ gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (!gpio_reg_base)
return -EINVAL;
@@ -816,7 +812,7 @@ static void pxa_gpio_resume(void)
#define pxa_gpio_resume NULL
#endif
-struct syscore_ops pxa_gpio_syscore_ops = {
+static struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 500a3596aaf4..70e95fc4779f 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -430,7 +430,7 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
static int gpio_rcar_probe(struct platform_device *pdev)
{
struct gpio_rcar_priv *p;
- struct resource *io, *irq;
+ struct resource *irq;
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
struct device *dev = &pdev->dev;
@@ -461,8 +461,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err0;
}
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(dev, io);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base)) {
ret = PTR_ERR(p->base);
goto err0;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index c333046d02b8..fb143f28c386 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -23,7 +23,6 @@ struct sch_gpio {
struct gpio_chip chip;
spinlock_t lock;
unsigned short iobase;
- unsigned short core_base;
unsigned short resume_base;
};
@@ -166,7 +165,6 @@ static int sch_gpio_probe(struct platform_device *pdev)
switch (pdev->id) {
case PCI_DEVICE_ID_INTEL_SCH_LPC:
- sch->core_base = 0;
sch->resume_base = 10;
sch->chip.ngpio = 14;
@@ -185,19 +183,16 @@ static int sch_gpio_probe(struct platform_device *pdev)
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:
- sch->core_base = 0;
sch->resume_base = 5;
sch->chip.ngpio = 14;
break;
case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
- sch->core_base = 0;
sch->resume_base = 21;
sch->chip.ngpio = 30;
break;
case PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB:
- sch->core_base = 0;
sch->resume_base = 2;
sch->chip.ngpio = 8;
break;
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index ee3039f091f4..6eca531b7d96 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -122,15 +122,13 @@ static int spics_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_spics *spics;
- struct resource *res;
int ret;
spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL);
if (!spics)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spics->base = devm_ioremap_resource(&pdev->dev, res);
+ spics->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spics->base))
return PTR_ERR(spics->base);
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 55072d2b367f..f5c8b3a351d5 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -219,7 +219,6 @@ static int sprd_gpio_probe(struct platform_device *pdev)
{
struct gpio_irq_chip *irq;
struct sprd_gpio *sprd_gpio;
- struct resource *res;
int ret;
sprd_gpio = devm_kzalloc(&pdev->dev, sizeof(*sprd_gpio), GFP_KERNEL);
@@ -232,8 +231,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
return sprd_gpio->irq;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sprd_gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ sprd_gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sprd_gpio->base))
return PTR_ERR(sprd_gpio->base);
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 2283c869ad5d..a51c310708b8 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -360,7 +360,6 @@ static int gsta_probe(struct platform_device *dev)
struct pci_dev *pdev;
struct sta2x11_gpio_pdata *gpio_pdata;
struct gsta_gpio *chip;
- struct resource *res;
pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev);
gpio_pdata = dev_get_platdata(&pdev->dev);
@@ -369,13 +368,11 @@ static int gsta_probe(struct platform_device *dev)
dev_err(&dev->dev, "no gpio config\n");
pr_debug("gpio config: %p\n", gpio_pdata);
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-
chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->dev = &dev->dev;
- chip->reg_base = devm_ioremap_resource(&dev->dev, res);
+ chip->reg_base = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(chip->reg_base))
return PTR_ERR(chip->reg_base);
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 19972084c45b..8a319d56c5de 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -210,7 +210,6 @@ static int xway_stp_hw_init(struct xway_stp *chip)
static int xway_stp_probe(struct platform_device *pdev)
{
- struct resource *res;
u32 shadow, groups, dsl, phy;
struct xway_stp *chip;
struct clk *clk;
@@ -220,8 +219,7 @@ static int xway_stp_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->virt = devm_ioremap_resource(&pdev->dev, res);
+ chip->virt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->virt))
return PTR_ERR(chip->virt);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index d5e5d19f4c0a..6bbac6c83f29 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -120,7 +120,6 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
static int tb10x_gpio_probe(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio;
- struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret = -EBUSY;
@@ -136,8 +135,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (tb10x_gpio == NULL)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tb10x_gpio->base = devm_ioremap_resource(dev, mem);
+ tb10x_gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 1ececf2c3282..6d9b6906b9d0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -569,7 +569,6 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
- struct resource *res;
struct tegra_gpio_bank *bank;
unsigned int gpio, i, j;
int ret;
@@ -645,8 +644,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank->tgi = tgi;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tgi->regs = devm_ioremap_resource(&pdev->dev, res);
+ tgi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tgi->regs))
return PTR_ERR(tgi->regs);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 314e300d6ba3..1c70e831069c 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -229,7 +229,6 @@ static int timbgpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
struct timbgpio *tgpio;
- struct resource *iomem;
struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
int irq = platform_get_irq(pdev, 0);
@@ -246,8 +245,7 @@ static int timbgpio_probe(struct platform_device *pdev)
spin_lock_init(&tgpio->lock);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tgpio->membase = devm_ioremap_resource(dev, iomem);
+ tgpio->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tgpio->membase))
return PTR_ERR(tgpio->membase);
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index c2a80b4cbf32..8c0d82d926dd 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -23,7 +23,6 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
{
struct device_node *node;
struct gpio_chip *chip;
- struct resource *res;
void __iomem *base_addr;
int retval;
u32 ngpios;
@@ -32,8 +31,7 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_addr = devm_ioremap_resource(&pdev->dev, res);
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 0f662b297a95..93cdcc41e9fb 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -346,7 +346,6 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
struct uniphier_gpio_priv *priv;
struct gpio_chip *chip;
struct irq_chip *irq_chip;
- struct resource *regs;
unsigned int nregs;
u32 ngpios;
int ret;
@@ -370,8 +369,7 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, regs);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 541fa6ac399d..30aef41e3b7e 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -29,6 +29,7 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
+ struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -60,8 +61,6 @@ struct vf610_gpio_port {
#define PORT_INT_EITHER_EDGE 0xb
#define PORT_INT_LOGIC_ONE 0xc
-static struct irq_chip vf610_gpio_irq_chip;
-
static const struct fsl_gpio_soc_data imx_data = {
.have_paddr = true,
};
@@ -86,28 +85,24 @@ static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
unsigned long mask = BIT(gpio);
- void __iomem *addr;
+ unsigned long offset = GPIO_PDIR;
if (port->sdata && port->sdata->have_paddr) {
mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- addr = mask ? port->gpio_base + GPIO_PDOR :
- port->gpio_base + GPIO_PDIR;
- return !!(vf610_gpio_readl(addr) & BIT(gpio));
- } else {
- return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR)
- & BIT(gpio));
+ if (mask)
+ offset = GPIO_PDOR;
}
+
+ return !!(vf610_gpio_readl(port->gpio_base + offset) & BIT(gpio));
}
static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
unsigned long mask = BIT(gpio);
+ unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR;
- if (val)
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PSOR);
- else
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PCOR);
+ vf610_gpio_writel(mask, port->gpio_base + offset);
}
static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -237,37 +232,31 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
-static struct irq_chip vf610_gpio_irq_chip = {
- .name = "gpio-vf610",
- .irq_ack = vf610_gpio_irq_ack,
- .irq_mask = vf610_gpio_irq_mask,
- .irq_unmask = vf610_gpio_irq_unmask,
- .irq_set_type = vf610_gpio_irq_set_type,
- .irq_set_wake = vf610_gpio_irq_set_wake,
-};
+static void vf610_gpio_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
static int vf610_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
- struct resource *iores;
struct gpio_chip *gc;
+ struct irq_chip *ic;
int i;
int ret;
- port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->sdata = of_device_get_match_data(dev);
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->base = devm_ioremap_resource(dev, iores);
+ port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- port->gpio_base = devm_ioremap_resource(dev, iores);
+ port->gpio_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(port->gpio_base))
return PTR_ERR(port->gpio_base);
@@ -275,11 +264,15 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (port->irq < 0)
return port->irq;
- port->clk_port = devm_clk_get(&pdev->dev, "port");
+ port->clk_port = devm_clk_get(dev, "port");
if (!IS_ERR(port->clk_port)) {
ret = clk_prepare_enable(port->clk_port);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, vf610_gpio_disable_clk,
+ port->clk_port);
+ if (ret)
+ return ret;
} else if (port->clk_port == ERR_PTR(-EPROBE_DEFER)) {
/*
* Percolate deferrals, for anything else,
@@ -288,20 +281,19 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return PTR_ERR(port->clk_port);
}
- port->clk_gpio = devm_clk_get(&pdev->dev, "gpio");
+ port->clk_gpio = devm_clk_get(dev, "gpio");
if (!IS_ERR(port->clk_gpio)) {
ret = clk_prepare_enable(port->clk_gpio);
- if (ret) {
- clk_disable_unprepare(port->clk_port);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, vf610_gpio_disable_clk,
+ port->clk_gpio);
+ if (ret)
return ret;
- }
} else if (port->clk_gpio == ERR_PTR(-EPROBE_DEFER)) {
- clk_disable_unprepare(port->clk_port);
return PTR_ERR(port->clk_gpio);
}
- platform_set_drvdata(pdev, port);
-
gc = &port->gc;
gc->of_node = np;
gc->parent = dev;
@@ -316,7 +308,15 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
- ret = gpiochip_add_data(gc, port);
+ ic = &port->ic;
+ ic->name = "gpio-vf610";
+ ic->irq_ack = vf610_gpio_irq_ack;
+ ic->irq_mask = vf610_gpio_irq_mask;
+ ic->irq_unmask = vf610_gpio_irq_unmask;
+ ic->irq_set_type = vf610_gpio_irq_set_type;
+ ic->irq_set_wake = vf610_gpio_irq_set_wake;
+
+ ret = devm_gpiochip_add_data(dev, gc, port);
if (ret < 0)
return ret;
@@ -327,39 +327,23 @@ static int vf610_gpio_probe(struct platform_device *pdev)
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
- ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
- handle_edge_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add irqchip\n");
- gpiochip_remove(gc);
return ret;
}
- gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq,
+ gpiochip_set_chained_irqchip(gc, ic, port->irq,
vf610_gpio_irq_handler);
return 0;
}
-static int vf610_gpio_remove(struct platform_device *pdev)
-{
- struct vf610_gpio_port *port = platform_get_drvdata(pdev);
-
- gpiochip_remove(&port->gc);
- if (!IS_ERR(port->clk_port))
- clk_disable_unprepare(port->clk_port);
- if (!IS_ERR(port->clk_gpio))
- clk_disable_unprepare(port->clk_gpio);
-
- return 0;
-}
-
static struct platform_driver vf610_gpio_driver = {
.driver = {
.name = "gpio-vf610",
.of_match_table = vf610_gpio_dt_ids,
},
.probe = vf610_gpio_probe,
- .remove = vf610_gpio_remove,
};
builtin_platform_driver(vf610_gpio_driver);
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2eb76f35aa7e..641a05181017 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -229,7 +229,6 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv;
int ret;
- struct resource *res;
void __iomem *regs;
struct irq_domain *parent_domain = NULL;
u32 val32;
@@ -238,8 +237,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 0a3607fd21af..54d3359444f3 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -290,22 +290,17 @@ MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
static int xlp_gpio_probe(struct platform_device *pdev)
{
struct gpio_chip *gc;
- struct resource *iores;
struct xlp_gpio_priv *priv;
void __iomem *gpio_base;
int irq_base, irq, err;
int ngpio;
u32 soc_type;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENODEV;
-
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- gpio_base = devm_ioremap_resource(&pdev->dev, iores);
+ gpio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio_base))
return PTR_ERR(gpio_base);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 5eacad9b2692..fb927559aefa 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -218,15 +218,13 @@ static int zx_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx_gpio *chip;
- struct resource *res;
int irq, id, ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 00ff7b1fa8a1..9392edaeec3f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -834,7 +834,6 @@ static int zynq_gpio_probe(struct platform_device *pdev)
int ret, bank_num;
struct zynq_gpio *gpio;
struct gpio_chip *chip;
- struct resource *res;
const struct of_device_id *match;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -849,8 +848,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
gpio->p_data = match->data;
platform_set_drvdata(pdev, gpio);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ gpio->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base_addr))
return PTR_ERR(gpio->base_addr);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 30d0baf7ddae..c9fc9e232aaf 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -24,13 +24,13 @@
*
* @node: list-entry of the events list of the struct acpi_gpio_chip
* @handle: handle of ACPI method to execute when the IRQ triggers
- * @handler: irq_handler to pass to request_irq when requesting the IRQ
- * @pin: GPIO pin number on the gpio_chip
- * @irq: Linux IRQ number for the event, for request_ / free_irq
- * @irqflags: flags to pass to request_irq when requesting the IRQ
+ * @handler: handler function to pass to request_irq() when requesting the IRQ
+ * @pin: GPIO pin number on the struct gpio_chip
+ * @irq: Linux IRQ number for the event, for request_irq() / free_irq()
+ * @irqflags: flags to pass to request_irq() when requesting the IRQ
* @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
- * @irq_requested:True if request_irq has been done
- * @desc: gpio_desc for the GPIO pin for this event
+ * @irq_requested:True if request_irq() has been done
+ * @desc: struct gpio_desc for the GPIO pin for this event
*/
struct acpi_gpio_event {
struct list_head node;
@@ -65,10 +65,10 @@ struct acpi_gpio_chip {
};
/*
- * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
* (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
- * late_initcall_sync handler, so that other builtin drivers can register their
- * OpRegions before the event handlers can run. This list contains gpiochips
+ * late_initcall_sync() handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains GPIO chips
* for which the acpi_gpiochip_request_irqs() call has been deferred.
*/
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
@@ -90,7 +90,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
*
* Return: GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR
* error value. Specifically returns %-EPROBE_DEFER if the referenced GPIO
- * controller does not have gpiochip registered at the moment. This is to
+ * controller does not have GPIO chip registered at the moment. This is to
* support probe deferral.
*/
static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
@@ -287,9 +287,9 @@ fail_free_desc:
*
* ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
* handled by ACPI event methods which need to be called from the GPIO
- * chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
- * gpio pins have acpi event methods and assigns interrupt handlers that calls
- * the acpi event methods for those pins.
+ * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which
+ * GPIO pins have ACPI event methods and assigns interrupt handlers that calls
+ * the ACPI event methods for those pins.
*/
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
{
@@ -444,8 +444,6 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
static enum gpiod_flags
acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
{
- bool pull_up = agpio->pin_config == ACPI_PIN_CONFIG_PULLUP;
-
switch (agpio->io_restriction) {
case ACPI_IO_RESTRICT_INPUT:
return GPIOD_IN;
@@ -454,16 +452,26 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
* ACPI GPIO resources don't contain an initial value for the
* GPIO. Therefore we deduce that value from the pull field
* instead. If the pin is pulled up we assume default to be
- * high, otherwise low.
+ * high, if it is pulled down we assume default to be low,
+ * otherwise we leave pin untouched.
*/
- return pull_up ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ switch (agpio->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ return GPIOD_OUT_HIGH;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ return GPIOD_OUT_LOW;
+ default:
+ break;
+ }
default:
- /*
- * Assume that the BIOS has configured the direction and pull
- * accordingly.
- */
- return GPIOD_ASIS;
+ break;
}
+
+ /*
+ * Assume that the BIOS has configured the direction and pull
+ * accordingly.
+ */
+ return GPIOD_ASIS;
}
static int
@@ -517,6 +525,26 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *inf
return ret;
}
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ switch (info->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ *lookupflags |= GPIO_PULL_UP;
+ break;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ *lookupflags |= GPIO_PULL_DOWN;
+ break;
+ default:
+ break;
+ }
+
+ if (info->polarity == GPIO_ACTIVE_LOW)
+ *lookupflags |= GPIO_ACTIVE_LOW;
+
+ return 0;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
@@ -550,6 +578,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
+ lookup->info.pin_config = agpio->pin_config;
lookup->info.gpioint = gpioint;
/*
@@ -653,7 +682,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* that case @index is used to select the GPIO entry in the property value
* (in case of multiple).
*
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * If the GPIO cannot be translated or there is an error, an ERR_PTR is
* returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
@@ -696,7 +725,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags)
+ unsigned long *lookupflags)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
@@ -737,10 +766,8 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
return ERR_PTR(-ENOENT);
}
- if (info.polarity == GPIO_ACTIVE_LOW)
- *lookupflags |= GPIO_ACTIVE_LOW;
-
acpi_gpio_update_gpiod_flags(dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
return desc;
}
@@ -751,10 +778,13 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
* @index: index of GpioIo/GpioInt resource (starting from %0)
* @info: info pointer to fill in (optional)
*
- * If @fwnode is an ACPI device object, call %acpi_get_gpiod_by_index() for it.
- * Otherwise (ie. it is a data-only non-device object), use the property-based
+ * If @fwnode is an ACPI device object, call acpi_get_gpiod_by_index() for it.
+ * Otherwise (i.e. it is a data-only non-device object), use the property-based
* GPIO lookup to get to the GPIO resource with the relevant information and use
* that to obtain the GPIO descriptor to return.
+ *
+ * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * returned.
*/
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
@@ -816,6 +846,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
return PTR_ERR(desc);
if (info.gpioint && idx++ == index) {
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
char label[32];
int irq;
@@ -827,7 +858,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
return irq;
snprintf(label, sizeof(label), "GpioInt() %d", index);
- ret = gpiod_configure_flags(desc, label, 0, info.flags);
+ ret = gpiod_configure_flags(desc, label, lflags, info.flags);
if (ret < 0)
return ret;
@@ -992,16 +1023,19 @@ static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
}
}
-static struct gpio_desc *acpi_gpiochip_parse_own_gpio(
- struct acpi_gpio_chip *achip, struct fwnode_handle *fwnode,
- const char **name, unsigned int *lflags, unsigned int *dflags)
+static struct gpio_desc *
+acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
+ struct fwnode_handle *fwnode,
+ const char **name,
+ unsigned long *lflags,
+ enum gpiod_flags *dflags)
{
struct gpio_chip *chip = achip->chip;
struct gpio_desc *desc;
u32 gpios[2];
int ret;
- *lflags = 0;
+ *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
*dflags = 0;
*name = NULL;
@@ -1037,7 +1071,8 @@ static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
struct fwnode_handle *fwnode;
device_for_each_child_node(chip->parent, fwnode) {
- unsigned int lflags, dflags;
+ unsigned long lflags;
+ enum gpiod_flags dflags;
struct gpio_desc *desc;
const char *name;
int ret;
@@ -1158,11 +1193,13 @@ static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
}
/**
- * acpi_gpio_count - return the number of GPIOs associated with a
- * device / function or -ENOENT if no GPIO has been
- * assigned to the requested function.
- * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * acpi_gpio_count - count the GPIOs associated with a device / function
+ * @dev: GPIO consumer, can be %NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
+ *
+ * Return:
+ * The number of GPIOs associated with a device / function or %-ENOENT,
+ * if no GPIO has been assigned to the requested function.
*/
int acpi_gpio_count(struct device *dev, const char *con_id)
{
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 6a3ec575a404..aec7bd86ae7e 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -86,9 +86,9 @@ static void of_gpio_flags_quirks(struct device_node *np,
if (IS_ENABLED(CONFIG_REGULATOR) &&
(of_device_is_compatible(np, "regulator-fixed") ||
of_device_is_compatible(np, "reg-fixed-voltage") ||
- (of_device_is_compatible(np, "regulator-gpio") &&
- !(strcmp(propname, "enable-gpio") &&
- strcmp(propname, "enable-gpios"))))) {
+ (!(strcmp(propname, "enable-gpio") &&
+ strcmp(propname, "enable-gpios")) &&
+ of_device_is_compatible(np, "regulator-gpio")))) {
/*
* The regulator GPIO handles are specified such that the
* presence or absence of "enable-active-high" solely controls
@@ -119,9 +119,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* property named "cs-gpios" we need to inspect the child node
* to determine if the flags should have inverted semantics.
*/
- if (IS_ENABLED(CONFIG_SPI_MASTER) &&
- of_property_read_bool(np, "cs-gpios") &&
- !strcmp(propname, "cs-gpios")) {
+ if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
+ of_property_read_bool(np, "cs-gpios")) {
struct device_node *child;
u32 cs;
int ret;
@@ -288,8 +287,7 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
}
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned int idx, unsigned long *flags)
{
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
@@ -362,8 +360,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
* @chip: GPIO chip whose hog is parsed
* @idx: Index of the GPIO to parse
* @name: GPIO line name
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_parse_own_gpio()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_parse_own_gpio()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
@@ -372,7 +370,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
struct gpio_chip *chip,
unsigned int idx, const char **name,
- enum gpio_lookup_flags *lflags,
+ unsigned long *lflags,
enum gpiod_flags *dflags)
{
struct device_node *chip_np;
@@ -388,7 +386,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
return ERR_PTR(-EINVAL);
xlate_flags = 0;
- *lflags = 0;
+ *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
*dflags = 0;
ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp);
@@ -445,7 +443,7 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
struct gpio_desc *desc = NULL;
struct device_node *np;
const char *name;
- enum gpio_lookup_flags lflags;
+ unsigned long lflags;
enum gpiod_flags dflags;
unsigned int i;
int ret;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bca3e7740ef6..e013d417a936 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2519,6 +2519,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label,
enum gpiod_flags flags)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
int err;
@@ -2531,7 +2532,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
if (err < 0)
return ERR_PTR(err);
- err = gpiod_configure_flags(desc, label, 0, flags);
+ err = gpiod_configure_flags(desc, label, lflags, flags);
if (err) {
chip_err(chip, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
@@ -2569,8 +2570,20 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
enum pin_config_param mode)
{
- unsigned long config = { PIN_CONF_PACKED(mode, 0) };
+ unsigned long config;
+ unsigned arg;
+ switch (mode) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = 1;
+ break;
+
+ default:
+ arg = 0;
+ }
+
+ config = PIN_CONF_PACKED(mode, arg);
return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
}
@@ -3915,8 +3928,7 @@ found:
}
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned int idx, unsigned long *flags)
{
struct gpio_desc *desc = ERR_PTR(-ENOENT);
struct gpiod_lookup_table *table;
@@ -4072,8 +4084,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
* gpiod_configure_flags - helper function to configure a given GPIO
* @desc: gpio whose value will be assigned
* @con_id: function within the GPIO consumer
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -4155,9 +4167,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
unsigned int idx,
enum gpiod_flags flags)
{
+ unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = NULL;
int status;
- enum gpio_lookup_flags lookupflags = 0;
/* Maybe we have a device name, maybe not */
const char *devname = dev ? dev_name(dev) : "?";
@@ -4242,8 +4254,8 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
enum gpiod_flags dflags,
const char *label)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc;
- unsigned long lflags = 0;
enum of_gpio_flags flags;
bool active_low = false;
bool single_ended = false;
@@ -4321,8 +4333,8 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
enum gpiod_flags dflags,
const char *label)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = ERR_PTR(-ENODEV);
- unsigned long lflags = 0;
int ret;
if (!fwnode)
@@ -4342,9 +4354,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
return desc;
acpi_gpio_update_gpiod_flags(&dflags, &info);
-
- if (info.polarity == GPIO_ACTIVE_LOW)
- lflags |= GPIO_ACTIVE_LOW;
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
}
/* Currently only ACPI takes this path */
@@ -4395,8 +4405,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
* gpiod_hog - Hog the specified GPIO desc given the provided flags
* @desc: gpio whose value will be assigned
* @name: gpio line name
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*/
int gpiod_hog(struct gpio_desc *desc, const char *name,
@@ -4449,8 +4459,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/**
* gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog
* @chip: gpio chip to act on
- *
- * This is only used by of_gpiochip_remove to free hogged gpios
*/
static void gpiochip_free_hogs(struct gpio_chip *chip)
{
@@ -4620,7 +4628,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
*/
void gpiod_put(struct gpio_desc *desc)
{
- gpiod_free(desc);
+ if (desc)
+ gpiod_free(desc);
}
EXPORT_SYMBOL_GPL(gpiod_put);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 3243c1eb5c88..7a65dad43932 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -17,7 +17,6 @@
#include <linux/cdev.h>
enum of_gpio_flags;
-enum gpio_lookup_flags;
struct acpi_device;
/**
@@ -75,6 +74,7 @@ struct gpio_device {
* @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
@@ -83,6 +83,7 @@ struct acpi_gpio_info {
struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
+ int pin_config;
int polarity;
int triggering;
unsigned int quirks;
@@ -95,7 +96,7 @@ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags);
+ unsigned long *lookupflags);
struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags);
int of_gpiochip_add(struct gpio_chip *gc);
@@ -104,7 +105,7 @@ void of_gpiochip_remove(struct gpio_chip *gc);
static inline struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned long *lookupflags)
{
return ERR_PTR(-ENOENT);
}
@@ -126,12 +127,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
struct acpi_gpio_info *info);
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info);
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags);
+ unsigned long *lookupflags);
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
struct acpi_gpio_info *info);
@@ -154,11 +157,17 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *inf
{
return 0;
}
+static inline int
+acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ return 0;
+}
static inline struct gpio_desc *
acpi_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags)
+ unsigned long *lookupflags)
{
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2267e84d5cb4..e360a4a131e1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -200,7 +200,6 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
@@ -221,7 +220,6 @@ config DRM_AMDGPU
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
select CHASH
help
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4376b17ca594..56f8ca2a3bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
}
}
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
+ if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3e6823fdd939..58ed401c5996 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, range->blockable))
+ if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
@@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end = range->end - 1;
- if (amdgpu_mn_read_lock(amn, range->blockable))
+ if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, range->start, end);
@@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 95144e49c7f9..34471dbaa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -342,6 +342,16 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level)
return count;
+ /* profile_exit setting is valid only when current mode is in profile mode */
+ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
+ pr_err("Currently not in any profile mode!\n");
+ return -EINVAL;
+ }
+
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 905cce1814f3..05897b05766b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
psp_set_funcs(adev);
- return 0;
-}
-
-static int psp_sw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct psp_context *psp = &adev->psp;
- int ret;
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
+ return 0;
+}
+
+static int psp_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
+ int ret;
+
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a07c85815b7a..4f10f5aba00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2757,6 +2757,37 @@ error_free_sched_entity:
}
/**
+ * amdgpu_vm_check_clean_reserved - check if a VM is clean
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: the VM to check
+ *
+ * check all entries of the root PD, if any subsequent PDs are allocated,
+ * it means there are page table creating and filling, and is no a clean
+ * VM
+ *
+ * Returns:
+ * 0 if this VM is clean
+ */
+static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ enum amdgpu_vm_level root = adev->vm_manager.root_level;
+ unsigned int entries = amdgpu_vm_num_entries(adev, root);
+ unsigned int i = 0;
+
+ if (!(vm->root.entries))
+ return 0;
+
+ for (i = 0; i < entries; i++) {
+ if (vm->root.entries[i].base.bo)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
* @adev: amdgpu_device pointer
@@ -2786,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
return r;
/* Sanity checks */
- if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
- r = -EINVAL;
+ r = amdgpu_vm_check_clean_reserved(adev, vm);
+ if (r)
goto unreserve_bo;
- }
if (pasid) {
unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8dbad496b29f..2471e7cf75ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -372,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
if (amdgpu_sriov_runtime(adev))
schedule_work(&adev->virt.flr_work);
break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
* other msg like flr complete is not handled here.
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 39d151b79153..077e91a33d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -49,6 +49,7 @@ enum idh_event {
IDH_FLR_NOTIFICATION_CMPL,
IDH_SUCCESS,
IDH_FAIL,
+ IDH_QUERY_ALIVE,
IDH_EVENT_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index dc461df48da0..2191d3d0a219 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
0xFFFFFFFF, 0x00000004);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
offset = 0;
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
@@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.inst[i].gpu_addr));
offset = size;
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+
}
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index f3f5938430d4..c0ec27991c22 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+ offset = AMDGPU_VCE_FIRMWARE_OFFSET;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
+ uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
+ uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
+
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
- mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
+ (tmr_mc_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
@@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(adev->vce.gpu_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
+ offset & ~0x0f000000);
+
}
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
@@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
(adev->vce.gpu_addr >> 40) & 0xff);
- offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V4_0_FW_SIZE;
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
- offset & ~0x0f000000);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 1b2f69a9a24e..8d89ab7f0ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -31,7 +31,7 @@
#include "soc15_common.h"
#include "vega10_ih.h"
-
+#define MAX_REARM_RETRY 10
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -382,6 +382,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * vega10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* vega10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -395,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ vega10_ih_irq_rearm(adev, ih);
} else if (ih == &adev->irq.ih) {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
} else if (ih == &adev->irq.ih1) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 2cb09e088dce..769dbc7be8cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1272,8 +1272,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
- dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
- gpu->pdev->devfn);
+ dev->node_props.location_id = pci_dev_id(gpu->pdev);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1854506e3e8f..995f9df66142 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5242,7 +5242,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i, r;
+ uint32_t i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -5253,6 +5253,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
+ long r;
unsigned long flags;
struct amdgpu_bo *abo;
uint64_t tiling_flags;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8840f396a7b6..3dff9997f5e3 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -76,7 +76,6 @@ config DRM_PARADE_PS8622
depends on OF
select DRM_PANEL
select DRM_KMS_HELPER
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
Parade eDP-LVDS bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec2ca71e1323..c532e9c9e491 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -748,11 +748,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (mode->vrefresh <= 24000)
+ if (drm_mode_vrefresh(mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (mode->vrefresh <= 25000)
+ else if (drm_mode_vrefresh(mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (mode->vrefresh <= 30000)
+ else if (drm_mode_vrefresh(mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index 14a72c4c496d..dc825883400d 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -2,7 +2,6 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 148be8e1a090..3d5f1cb6a76c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -15,7 +15,6 @@ config DRM_I915
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2ec89bcb59f1..8a9606f91e68 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
struct dentry *ent;
- char name[10] = "";
+ char name[16] = "";
- sprintf(name, "vgpu%d", vgpu->id);
+ snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
if (!vgpu->debugfs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 4e1e425189ba..41c8ebc60c63 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -45,6 +45,7 @@ static int vgpu_gem_get_pages(
int i, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
+ u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (WARN_ON(!fb_info))
@@ -54,14 +55,15 @@ static int vgpu_gem_get_pages(
if (unlikely(!st))
return -ENOMEM;
- ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ page_num = obj->base.size >> PAGE_SHIFT;
+ ret = sg_alloc_table(st, page_num, GFP_KERNEL);
if (ret) {
kfree(st);
return ret;
}
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
- for_each_sg(st->sgl, sg, fb_info->size, i) {
+ for_each_sg(st->sgl, sg, page_num, i) {
sg->offset = 0;
sg->length = PAGE_SIZE;
sg_dma_address(sg) =
@@ -158,7 +160,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
return NULL;
drm_gem_private_object_init(dev, &obj->base,
- info->size << PAGE_SHIFT);
+ roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
obj->read_domains = I915_GEM_DOMAIN_GTT;
@@ -206,11 +208,12 @@ static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu_fb_info *info,
int plane_id)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
int ret, tile_height = 1;
+ memset(info, 0, sizeof(*info));
+
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
if (ret)
@@ -267,8 +270,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
return -EINVAL;
}
- info->size = (info->stride * roundup(info->height, tile_height)
- + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ info->size = info->stride * roundup(info->height, tile_height);
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
@@ -278,11 +280,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
return -EFAULT;
}
- if (((info->start >> PAGE_SHIFT) + info->size) >
- ggtt_total_entries(&dev_priv->ggtt)) {
- gvt_vgpu_err("Invalid GTT offset or size\n");
- return -EFAULT;
- }
if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
gvt_vgpu_err("invalid gma addr\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c2f7d20f6346..08c74e65836b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -811,7 +811,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -861,7 +861,7 @@ err_free_spt:
/* Allocate shadow page table associated with specific gfn. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
unsigned long gfn, bool guest_pde_ips)
{
struct intel_vgpu_ppgtt_spt *spt;
@@ -936,7 +936,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
- intel_gvt_gtt_type_t cur_pt_type;
+ enum intel_gvt_gtt_type cur_pt_type;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
@@ -1076,6 +1076,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
} else {
int type = get_next_pt_type(we->type);
+ if (!gtt_type_is_pt(type))
+ goto err;
+
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
@@ -1855,7 +1858,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm)
* Zero on success, negative error code in pointer if failed.
*/
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_mm *mm;
@@ -2309,7 +2312,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
}
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t type)
+ enum intel_gvt_gtt_type type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -2594,7 +2597,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
* Zero on success, negative error code if failed.
*/
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_vgpu_mm *mm;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 32c573aea494..42d0394f0de2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -95,8 +95,8 @@ struct intel_gvt_gtt {
unsigned long scratch_mfn;
};
-typedef enum {
- GTT_TYPE_INVALID = -1,
+enum intel_gvt_gtt_type {
+ GTT_TYPE_INVALID = 0,
GTT_TYPE_GGTT_PTE,
@@ -124,7 +124,7 @@ typedef enum {
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_MAX,
-} intel_gvt_gtt_type_t;
+};
enum intel_gvt_mm_type {
INTEL_GVT_MM_GGTT,
@@ -148,7 +148,7 @@ struct intel_vgpu_mm {
union {
struct {
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
/*
* The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
@@ -169,7 +169,7 @@ struct intel_vgpu_mm {
};
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
{
@@ -233,7 +233,7 @@ struct intel_vgpu_ppgtt_spt {
struct intel_vgpu *vgpu;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
@@ -241,7 +241,7 @@ struct intel_vgpu_ppgtt_spt {
} shadow_page;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
@@ -267,7 +267,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
u64 pdps[]);
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 18f01eeb2510..90673fca792f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1206,7 +1206,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
- intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
struct intel_vgpu_mm *mm;
u64 *pdps;
@@ -3303,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL},
+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index e7e14c842be4..edf6d646eb25 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
{RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 3de5b643b266..33aaa14bfdde 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -126,7 +126,4 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
-/* define the effective range of MCHBAR register on Sandybridge+ */
-#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
-
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 8998fa5ab198..7c99bbc3e2b8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1343,7 +1343,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
u64 pdps[GVT_RING_CTX_NR_PDPS];
switch (desc->addressing_mode) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 215bf3fef10c..8079ea3af103 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
while (it) {
struct drm_i915_gem_object *obj;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b836721d3b13..f6c78c0fa74b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -425,6 +425,26 @@ void __i915_request_submit(struct i915_request *request)
if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO);
+ /*
+ * Are we using semaphores when the gpu is already saturated?
+ *
+ * Using semaphores incurs a cost in having the GPU poll a
+ * memory location, busywaiting for it to change. The continual
+ * memory reads can have a noticeable impact on the rest of the
+ * system with the extra bus traffic, stalling the cpu as it too
+ * tries to access memory across the bus (perf stat -e bus-cycles).
+ *
+ * If we installed a semaphore on this request and we only submit
+ * the request after the signaler completed, that indicates the
+ * system is overloaded and using semaphores at this time only
+ * increases the amount of work we are doing. If so, we disable
+ * further use of semaphores until we are idle again, whence we
+ * optimistically try again.
+ */
+ if (request->sched.semaphores &&
+ i915_sw_fence_signaled(&request->semaphore))
+ request->hw_context->saturated |= request->sched.semaphores;
+
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -432,6 +452,7 @@ void __i915_request_submit(struct i915_request *request)
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
@@ -799,6 +820,39 @@ err_unreserve:
}
static int
+i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
+{
+ if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+ return 0;
+
+ signal = list_prev_entry(signal, ring_link);
+ if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ return 0;
+
+ return i915_sw_fence_await_dma_fence(&rq->submit,
+ &signal->fence, 0,
+ I915_FENCE_GFP);
+}
+
+static intel_engine_mask_t
+already_busywaiting(struct i915_request *rq)
+{
+ /*
+ * Polling a semaphore causes bus traffic, delaying other users of
+ * both the GPU and CPU. We want to limit the impact on others,
+ * while taking advantage of early submission to reduce GPU
+ * latency. Therefore we restrict ourselves to not using more
+ * than one semaphore from each source, and not using a semaphore
+ * if we have detected the engine is saturated (i.e. would not be
+ * submitted early and cause bus traffic reading an already passed
+ * semaphore).
+ *
+ * See the are-we-too-late? check in __i915_request_submit().
+ */
+ return rq->sched.semaphores | rq->hw_context->saturated;
+}
+
+static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
@@ -811,11 +865,15 @@ emit_semaphore_wait(struct i915_request *to,
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* Just emit the first semaphore we see as request space is limited. */
- if (to->sched.semaphores & from->engine->mask)
+ if (already_busywaiting(to) & from->engine->mask)
return i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
err = i915_sw_fence_await_dma_fence(&to->semaphore,
&from->fence, 0,
I915_FENCE_GFP);
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 3cbffd400b1b..832cb6b1e9bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -23,6 +23,7 @@
*/
#include <linux/kthread.h>
+#include <trace/events/dma_fence.h>
#include <uapi/linux/sched/types.h>
#include "i915_drv.h"
@@ -80,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
+static bool
+__dma_fence_signal(struct dma_fence *fence)
+{
+ return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
+static void
+__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ fence->timestamp = timestamp;
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
+}
+
+static void
+__dma_fence_signal__notify(struct dma_fence *fence)
+{
+ struct dma_fence_cb *cur, *tmp;
+
+ lockdep_assert_held(fence->lock);
+ lockdep_assert_irqs_disabled();
+
+ list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ INIT_LIST_HEAD(&cur->node);
+ cur->func(fence, cur);
+ }
+ INIT_LIST_HEAD(&fence->cb_list);
+}
+
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
LIST_HEAD(signal);
@@ -104,6 +135,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ if (!__dma_fence_signal(&rq->fence))
+ continue;
/*
* Queue for execution after dropping the signaling
@@ -111,14 +146,6 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine.
*/
i915_request_get(rq);
-
- /*
- * We may race with direct invocation of
- * dma_fence_signal(), e.g. i915_request_retire(),
- * so we need to acquire our reference to the request
- * before we cancel the breadcrumb.
- */
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal);
}
@@ -141,7 +168,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
- dma_fence_signal(&rq->fence);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+
+ spin_lock(&rq->lock);
+ __dma_fence_signal__notify(&rq->fence);
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
}
@@ -243,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
- if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
- return true;
-
- spin_lock(&b->irq_lock);
- if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
- !__request_completed(rq)) {
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
struct intel_context *ce = rq->hw_context;
struct list_head *pos;
+ spin_lock(&b->irq_lock);
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
__intel_breadcrumbs_arm_irq(b);
/*
@@ -284,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
list_move_tail(&ce->signal_link, &b->signalers);
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ spin_unlock(&b->irq_lock);
}
- spin_unlock(&b->irq_lock);
return !__request_completed(rq);
}
@@ -294,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
- return;
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
+ /*
+ * We must wait for b->irq_lock so that we know the interrupt handler
+ * has released its reference to the intel_context and has completed
+ * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
+ * required).
+ */
spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
struct intel_context *ce = rq->hw_context;
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
index 8931e0fee873..924cc556223a 100644
--- a/drivers/gpu/drm/i915/intel_context.c
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -230,6 +230,7 @@ intel_context_init(struct intel_context *ce,
ce->gem_context = ctx;
ce->engine = engine;
ce->ops = engine->cops;
+ ce->saturated = 0;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
index 68b4ca1611e0..339c7437fe82 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "i915_active_types.h"
+#include "intel_engine_types.h"
struct i915_gem_context;
struct i915_vma;
@@ -58,6 +59,8 @@ struct intel_context {
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+ intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
/**
* active_tracker: Active tracker for the external rq activity
* on this intel_context object.
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3bd40a4a6739..5098228f1302 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12082,6 +12082,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config,
bool adjust)
{
+ struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
bool ret = true;
bool fixup_inherited = adjust &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
@@ -12303,6 +12304,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
+ /*
+ * Changing the EDP transcoder input mux
+ * (A_ONOFF vs. A_ON) requires a full modeset.
+ */
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ current_config->cpu_transcoder == TRANSCODER_EDP)
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+
if (!adjust) {
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index c805a0966395..5679f2fffb7c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1280,6 +1280,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
+ /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
+ if (IS_GEMINILAKE(dev_priv))
+ return 0;
+
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 37f60cb8e9e1..46cd0e70aecb 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -23,7 +23,6 @@
*/
#include <linux/circ_buf.h>
-#include <trace/events/dma_fence.h>
#include "intel_guc_submission.h"
#include "intel_lrc_reg.h"
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index e94b5b1bc1b7..e7c7be4911c1 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -311,10 +311,17 @@ retry:
pipe_config->base.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) {
+ if (IS_HASWELL(dev_priv) &&
+ pipe_config->base.active && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP) {
+ bool old_need_power_well = pipe_config->pch_pfit.enabled ||
+ pipe_config->pch_pfit.force_thru;
+ bool new_need_power_well = pipe_config->pch_pfit.enabled ||
+ enable;
+
pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
+
+ if (old_need_power_well != new_need_power_well)
pipe_config->base.connectors_changed = true;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 9155dafae2a9..38e2cfa9cec7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -747,7 +747,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
* will make sure that the refcounting is correct in case we need to
* bring down the GX after a GMU failure
*/
- if (!IS_ERR(gmu->gxpd))
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_get(gmu->gxpd);
out:
@@ -863,7 +863,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
* domain. Usually the GMU does this but only if the shutdown sequence
* was successful
*/
- if (!IS_ERR(gmu->gxpd))
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_put_sync(gmu->gxpd);
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
@@ -1234,7 +1234,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
pm_runtime_disable(gmu->dev);
- if (!IS_ERR(gmu->gxpd)) {
+ if (!IS_ERR_OR_NULL(gmu->gxpd)) {
pm_runtime_disable(gmu->gxpd);
dev_pm_domain_detach(gmu->gxpd, false);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 018df2c3b7ed..45a5bc6ede5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_kms.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index da1f727d7495..ce1a555e1f31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -780,7 +780,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -799,8 +798,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
* implicit fence and fb prepare by hand here.
*/
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
if (fence)
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f5b1256e32b6..131c23a267ee 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -49,15 +49,13 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
if (!new_state->fb)
return 0;
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index eb33d2d00d77..e20e6b429804 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,7 +33,7 @@
#include <linux/types.h>
#include <linux/of_graph.h>
#include <linux/of_device.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <linux/kthread.h>
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 31d5a744d84f..35f55dd25994 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -803,7 +803,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
+ vma->aspace != NULL ? vma->aspace->name : NULL,
vma->iova, vma->mapped ? "mapped" : "unmapped",
vma->inuse);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5ac781dffee..812d1b1369a5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -86,10 +86,6 @@ struct msm_gem_object {
struct llist_node freed;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
/* For physically contiguous buffers. Used when we don't have
* an IOMMU. Also used for stolen/splashscreen buffer.
*/
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 553c7da5e8e0..1f1395148ff0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -5,14 +5,12 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
- select BACKLIGHT_LCD_SUPPORT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
select MXM_WMI if ACPI && X86
select POWER_SUPPLY
# Similar to i915, we need to select ACPI_VIDEO and it's dependencies
- select BACKLIGHT_LCD_SUPPORT if ACPI && X86
select BACKLIGHT_CLASS_DEVICE if ACPI && X86
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 2216c58620c2..7c41b0599d1a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -41,6 +41,7 @@ struct nv50_disp_interlock {
NV50_DISP_INTERLOCK__SIZE
} type;
u32 data;
+ u32 wimm;
};
void corec37d_ntfy_init(struct nouveau_bo *, u32);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 2e7a0c347ddb..06ee23823a68 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->set.or = head->func->or != NULL;
}
- if (asyh->state.mode_changed)
+ if (asyh->state.mode_changed || asyh->state.connectors_changed)
nv50_head_atomic_check_mode(head, asyh);
if (asyh->state.color_mgmt_changed ||
@@ -413,6 +413,7 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
asyh->ovly = armh->ovly;
asyh->dither = armh->dither;
asyh->procamp = armh->procamp;
+ asyh->or = armh->or;
asyh->dp = armh->dp;
asyh->clr.mask = 0;
asyh->set.mask = 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 9103b8494279..f7dbd965e4e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
return ret;
}
+ wndw->interlock.wimm = wndw->interlock.data;
wndw->immd = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index b95181027b31..283ff690350e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -127,7 +127,7 @@ void
nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
struct nv50_wndw_atom *asyw)
{
- if (interlock) {
+ if (interlock[NV50_DISP_INTERLOCK_CORE]) {
asyw->image.mode = 0;
asyw->image.interval = 1;
}
@@ -149,7 +149,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
if (asyw->set.point) {
if (asyw->set.point = false, asyw->set.mask)
interlock[wndw->interlock.type] |= wndw->interlock.data;
- interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
+ interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
wndw->immd->point(wndw, asyw);
wndw->immd->update(wndw, interlock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 22cd45845e07..7c2fcaba42d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -631,7 +631,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
- ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
+ true, false, 0, &device);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7971096b6767..10d91e8bbb94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2540,6 +2540,41 @@ nv166_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv167_chipset = {
+ .name = "TU117",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2824,8 +2859,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
u64 mmio_base, mmio_size;
u32 boot0, strap;
void __iomem *map;
- int ret = -EEXIST;
- int i;
+ int ret = -EEXIST, i;
+ unsigned chipset;
mutex_lock(&nv_devices_mutex);
if (nvkm_device_find_locked(handle))
@@ -2870,6 +2905,26 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
strap = ioread32_native(map + 0x101000);
iounmap(map);
+ /* chipset can be overridden for devel/testing purposes */
+ chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
+ if (chipset) {
+ u32 override_boot0;
+
+ if (chipset >= 0x10) {
+ override_boot0 = ((chipset & 0x1ff) << 20);
+ override_boot0 |= 0x000000a1;
+ } else {
+ if (chipset != 0x04)
+ override_boot0 = 0x20104000;
+ else
+ override_boot0 = 0x20004000;
+ }
+
+ nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
+ boot0, override_boot0);
+ boot0 = override_boot0;
+ }
+
/* determine chipset and derive architecture from it */
if ((boot0 & 0x1f000000) > 0) {
device->chipset = (boot0 & 0x1ff00000) >> 20;
@@ -2996,6 +3051,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
+ case 0x167: device->chip = &nv167_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 5f301e632599..818d21bd28d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
* and it's better to have a failed modeset than that.
*/
for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw)
- failsafe = cfg;
+ if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+ /* Try to respect sink limits too when selecting
+ * lowest link configuration.
+ */
+ if (!failsafe ||
+ (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
+ failsafe = cfg;
+ }
+
if (failsafe && cfg[1].rate < dataKBps)
break;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 970f669c6d29..3b2bced1b015 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -165,6 +165,10 @@ err_out0:
void panfrost_device_fini(struct panfrost_device *pfdev)
{
+ panfrost_job_fini(pfdev);
+ panfrost_mmu_fini(pfdev);
+ panfrost_gpu_fini(pfdev);
+ panfrost_reset_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 94b0819ad50b..d11e2281dde6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -219,7 +219,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
fail_job:
panfrost_job_put(job);
fail_out_sync:
- drm_syncobj_put(sync_out);
+ if (sync_out)
+ drm_syncobj_put(sync_out);
return ret;
}
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 0c5d391f0a8f..4501597f30ab 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm)
dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
return PTR_ERR(parent);
}
+
+ spin_lock_init(&priv->tim2_lock);
+
/* If the clock divider is broken, use the parent directly */
if (priv->variant->broken_clockdivider) {
priv->clk = parent;
return 0;
}
parent_name = __clk_get_name(parent);
-
- spin_lock_init(&priv->tim2_lock);
div->init = &init;
ret = devm_clk_hw_register(drm->dev, div);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index aa898c699101..433df7036f96 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -922,12 +922,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = min(max(den/post_div, 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
- *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+ *ref_div = (*ref_div * fb_div_max)/(*fb_div);
*fb_div = fb_div_max;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index b3019505065a..c9bd1278f573 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
/* TODO we should be able to split locking for interval tree and
* the tear down.
*/
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
mutex_lock(&rmn->lock);
else if (!mutex_trylock(&rmn->lock))
return -EAGAIN;
@@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct radeon_bo *bo;
long r;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index a8db758d523e..a2ebb08990e9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -221,26 +221,13 @@ static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
- unsigned int i, count = obj->size >> PAGE_SHIFT;
+ unsigned int count = obj->size >> PAGE_SHIFT;
unsigned long user_count = vma_pages(vma);
- unsigned long uaddr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff;
- unsigned long end = user_count + offset;
- int ret;
if (user_count == 0)
return -ENXIO;
- if (end > count)
- return -ENXIO;
- for (i = offset; i < end; i++) {
- ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
- if (ret)
- return ret;
- uaddr += PAGE_SIZE;
- }
-
- return 0;
+ return vm_map_pages(vma, rk_obj->pages, count);
}
static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 61bbe8e8bcc5..e2a6c82c8252 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -4,7 +4,6 @@ config DRM_SHMOBILE
depends on DRM && ARM
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index fb985ba1a176..2598741a00a6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -11,6 +11,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "sun4i_hdmi.h"
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 52598049c096..cb7df2086aee 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -8,7 +8,6 @@ config DRM_TILCDC
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
help
Choose this option if you have an TI SoC with LCDC display
controller, for example AM33xx in beagle-bone, DA8xx, or
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 9412709067f5..2ea4e20b7b8a 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -41,6 +41,7 @@
#include <linux/component.h>
#include <linux/dmaengine.h>
#include <linux/i2c.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 8bf3a7c23ed3..062067438f1d 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (NULL == vsg->pages)
return -ENOMEM;
ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
- vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
+ vsg->num_pages,
+ vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages);
if (ret != vsg->num_pages) {
if (ret < 0)
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 53c376d55fcf..a24548489dde 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -224,8 +224,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
static int gem_mmap_obj(struct xen_gem_object *xen_obj,
struct vm_area_struct *vma)
{
- unsigned long addr = vma->vm_start;
- int i;
+ int ret;
/*
* clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
@@ -252,18 +251,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
* FIXME: as we insert all the pages now then no .fault handler must
* be called, so don't provide one
*/
- for (i = 0; i < xen_obj->num_pages; i++) {
- int ret;
-
- ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
- if (ret < 0) {
- DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
- return ret;
- }
+ ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
+ if (ret < 0)
+ DRM_ERROR("Failed to map pages into vma: %d\n", ret);
- addr += PAGE_SIZE;
- }
- return 0;
+ return ret;
}
int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 46c6efea1404..abdb01879caa 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1051,6 +1051,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x28b: map_key_clear(KEY_FORWARDMAIL); break;
case 0x28c: map_key_clear(KEY_SEND); break;
+ case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
+
case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index c4dd6301e7c8..0daf0b32aa4a 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -830,10 +830,8 @@ static int aspeed_create_pwm_cooling(struct device *dev,
}
snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%pOFn%d", child, pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &aspeed_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &aspeed_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index f1bf67aca9e8..3f6e5b4e3997 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -498,6 +498,11 @@ static const struct of_device_id of_gpio_fan_match[] = {
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
+static void gpio_fan_stop(void *data)
+{
+ set_fan_speed(data, 0);
+}
+
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
@@ -532,6 +537,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
err = fan_ctrl_init(fan_data);
if (err)
return err;
+ devm_add_action_or_reset(dev, gpio_fan_stop, fan_data);
}
/* Make this driver part of hwmon class. */
@@ -543,32 +549,20 @@ static int gpio_fan_probe(struct platform_device *pdev)
return PTR_ERR(fan_data->hwmon_dev);
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(np,
- "gpio-fan",
- fan_data,
- &gpio_fan_cool_ops);
+ fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
+ "gpio-fan", fan_data, &gpio_fan_cool_ops);
dev_info(dev, "GPIO fan initialized\n");
return 0;
}
-static int gpio_fan_remove(struct platform_device *pdev)
+static void gpio_fan_shutdown(struct platform_device *pdev)
{
struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
- if (!IS_ERR(fan_data->cdev))
- thermal_cooling_device_unregister(fan_data->cdev);
-
if (fan_data->gpios)
set_fan_speed(fan_data, 0);
-
- return 0;
-}
-
-static void gpio_fan_shutdown(struct platform_device *pdev)
-{
- gpio_fan_remove(pdev);
}
#ifdef CONFIG_PM_SLEEP
@@ -602,7 +596,6 @@ static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
- .remove = gpio_fan_remove,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index cd91510a5387..e694c46ff039 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -118,9 +118,7 @@ static DEFINE_IDA(hwmon_ida);
* The complex conditional is necessary to avoid a cyclic dependency
* between hwmon and thermal_sys modules.
*/
-#if IS_REACHABLE(CONFIG_THERMAL) && defined(CONFIG_THERMAL_OF) && \
- (!defined(CONFIG_THERMAL_HWMON) || \
- !(defined(MODULE) && IS_MODULE(CONFIG_THERMAL)))
+#ifdef CONFIG_THERMAL_OF
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index f816d2ae1e58..ed8d59d4eecb 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -465,42 +465,42 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
static int mlxreg_fan_probe(struct platform_device *pdev)
{
struct mlxreg_core_platform_data *pdata;
+ struct device *dev = &pdev->dev;
struct mlxreg_fan *fan;
struct device *hwm;
int err;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "Failed to get platform data.\n");
+ dev_err(dev, "Failed to get platform data.\n");
return -EINVAL;
}
- fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ fan = devm_kzalloc(dev, sizeof(*fan), GFP_KERNEL);
if (!fan)
return -ENOMEM;
- fan->dev = &pdev->dev;
+ fan->dev = dev;
fan->regmap = pdata->regmap;
- platform_set_drvdata(pdev, fan);
err = mlxreg_fan_config(fan, pdata);
if (err)
return err;
- hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ hwm = devm_hwmon_device_register_with_info(dev, "mlxreg_fan",
fan,
&mlxreg_fan_hwmon_chip_info,
NULL);
if (IS_ERR(hwm)) {
- dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ dev_err(dev, "Failed to register hwmon device\n");
return PTR_ERR(hwm);
}
if (IS_REACHABLE(CONFIG_THERMAL)) {
- fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
- &mlxreg_fan_cooling_ops);
+ fan->cdev = devm_thermal_of_cooling_device_register(dev,
+ NULL, "mlxreg_fan", fan, &mlxreg_fan_cooling_ops);
if (IS_ERR(fan->cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
+ dev_err(dev, "Failed to register cooling device\n");
return PTR_ERR(fan->cdev);
}
}
@@ -508,22 +508,11 @@ static int mlxreg_fan_probe(struct platform_device *pdev)
return 0;
}
-static int mlxreg_fan_remove(struct platform_device *pdev)
-{
- struct mlxreg_fan *fan = platform_get_drvdata(pdev);
-
- if (IS_REACHABLE(CONFIG_THERMAL))
- thermal_cooling_device_unregister(fan->cdev);
-
- return 0;
-}
-
static struct platform_driver mlxreg_fan_driver = {
.driver = {
.name = "mlxreg-fan",
},
.probe = mlxreg_fan_probe,
- .remove = mlxreg_fan_remove,
};
module_platform_driver(mlxreg_fan_driver);
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 1dc0cd452498..09aaefa6fdb8 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -846,10 +846,8 @@ static int npcm7xx_create_pwm_cooling(struct device *dev,
snprintf(cdev->name, THERMAL_NAME_LENGTH, "%pOFn%d", child,
pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &npcm7xx_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &npcm7xx_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index eead8afe6447..5fb2745f0226 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -273,27 +273,40 @@ static int pwm_fan_of_get_cooling_data(struct device *dev,
return 0;
}
+static void pwm_fan_regulator_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static void pwm_fan_pwm_disable(void *__ctx)
+{
+ struct pwm_fan_ctx *ctx = __ctx;
+ pwm_disable(ctx->pwm);
+ del_timer_sync(&ctx->rpm_timer);
+}
+
static int pwm_fan_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
struct pwm_fan_ctx *ctx;
struct device *hwmon;
int ret;
struct pwm_state state = { };
u32 ppr = 2;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_init(&ctx->lock);
- ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
+ ctx->pwm = devm_of_pwm_get(dev, dev->of_node, NULL);
if (IS_ERR(ctx->pwm)) {
ret = PTR_ERR(ctx->pwm);
if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
+ dev_err(dev, "Could not get PWM: %d\n", ret);
return ret;
}
@@ -304,7 +317,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (ctx->irq == -EPROBE_DEFER)
return ctx->irq;
- ctx->reg_en = devm_regulator_get_optional(&pdev->dev, "fan");
+ ctx->reg_en = devm_regulator_get_optional(dev, "fan");
if (IS_ERR(ctx->reg_en)) {
if (PTR_ERR(ctx->reg_en) != -ENODEV)
return PTR_ERR(ctx->reg_en);
@@ -313,10 +326,11 @@ static int pwm_fan_probe(struct platform_device *pdev)
} else {
ret = regulator_enable(ctx->reg_en);
if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable fan supply: %d\n", ret);
+ dev_err(dev, "Failed to enable fan supply: %d\n", ret);
return ret;
}
+ devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
+ ctx->reg_en);
}
ctx->pwm_value = MAX_PWM;
@@ -328,91 +342,57 @@ static int pwm_fan_probe(struct platform_device *pdev)
ret = pwm_apply_state(ctx->pwm, &state);
if (ret) {
- dev_err(&pdev->dev, "Failed to configure PWM: %d\n", ret);
- goto err_reg_disable;
+ dev_err(dev, "Failed to configure PWM: %d\n", ret);
+ return ret;
}
-
timer_setup(&ctx->rpm_timer, sample_timer, 0);
+ devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
- of_property_read_u32(pdev->dev.of_node, "pulses-per-revolution", &ppr);
+ of_property_read_u32(dev->of_node, "pulses-per-revolution", &ppr);
ctx->pulses_per_revolution = ppr;
if (!ctx->pulses_per_revolution) {
- dev_err(&pdev->dev, "pulses-per-revolution can't be zero.\n");
- ret = -EINVAL;
- goto err_pwm_disable;
+ dev_err(dev, "pulses-per-revolution can't be zero.\n");
+ return -EINVAL;
}
if (ctx->irq > 0) {
- ret = devm_request_irq(&pdev->dev, ctx->irq, pulse_handler, 0,
+ ret = devm_request_irq(dev, ctx->irq, pulse_handler, 0,
pdev->name, ctx);
if (ret) {
- dev_err(&pdev->dev,
- "Failed to request interrupt: %d\n", ret);
- goto err_pwm_disable;
+ dev_err(dev, "Failed to request interrupt: %d\n", ret);
+ return ret;
}
ctx->sample_start = ktime_get();
mod_timer(&ctx->rpm_timer, jiffies + HZ);
}
- hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, "pwmfan",
+ hwmon = devm_hwmon_device_register_with_groups(dev, "pwmfan",
ctx, pwm_fan_groups);
if (IS_ERR(hwmon)) {
- ret = PTR_ERR(hwmon);
- dev_err(&pdev->dev,
- "Failed to register hwmon device: %d\n", ret);
- goto err_del_timer;
+ dev_err(dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwmon);
}
- ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx);
+ ret = pwm_fan_of_get_cooling_data(dev, ctx);
if (ret)
- goto err_del_timer;
+ return ret;
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
- cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
- "pwm-fan", ctx,
- &pwm_fan_cooling_ops);
+ cdev = devm_thermal_of_cooling_device_register(dev,
+ dev->of_node, "pwm-fan", ctx, &pwm_fan_cooling_ops);
if (IS_ERR(cdev)) {
ret = PTR_ERR(cdev);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Failed to register pwm-fan as cooling device: %d\n",
ret);
- goto err_del_timer;
+ return ret;
}
ctx->cdev = cdev;
thermal_cdev_update(cdev);
}
return 0;
-
-err_del_timer:
- del_timer_sync(&ctx->rpm_timer);
-
-err_pwm_disable:
- state.enabled = false;
- pwm_apply_state(ctx->pwm, &state);
-
-err_reg_disable:
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return ret;
-}
-
-static int pwm_fan_remove(struct platform_device *pdev)
-{
- struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev);
-
- thermal_cooling_device_unregister(ctx->cdev);
- del_timer_sync(&ctx->rpm_timer);
-
- if (ctx->pwm_value)
- pwm_disable(ctx->pwm);
-
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -480,7 +460,6 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
- .remove = pwm_fan_remove,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 06ca3f7fcc44..4a5eff3f18bc 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -733,11 +733,11 @@ static int iio_channel_read_avail(struct iio_channel *chan,
vals, type, length, info);
}
-int iio_read_avail_channel_raw(struct iio_channel *chan,
- const int **vals, int *length)
+int iio_read_avail_channel_attribute(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum attribute)
{
int ret;
- int type;
mutex_lock(&chan->indio_dev->info_exist_lock);
if (!chan->indio_dev->info) {
@@ -745,11 +745,23 @@ int iio_read_avail_channel_raw(struct iio_channel *chan,
goto err_unlock;
}
- ret = iio_channel_read_avail(chan,
- vals, &type, length, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_read_avail(chan, vals, type, length, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
+
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length)
+{
+ int ret;
+ int type;
+
+ ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
+ IIO_CHAN_INFO_RAW);
+
if (ret >= 0 && type != IIO_VAL_INT)
/* raw values are assumed to be IIO_VAL_INT */
ret = -EINVAL;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index ba01b90c04e7..2f7d14159841 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -731,8 +731,8 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
if (rec->roce.route_resolved)
return 0;
- rdma_gid2ip(&sgid._sockaddr, &rec->sgid);
- rdma_gid2ip(&dgid._sockaddr, &rec->dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid, &rec->sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid, &rec->dgid);
if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
return -EINVAL;
@@ -743,7 +743,7 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
dev_addr.net = &init_net;
dev_addr.sgid_attr = attr;
- ret = addr_resolve(&sgid._sockaddr, &dgid._sockaddr,
+ ret = addr_resolve((struct sockaddr *)&sgid, (struct sockaddr *)&dgid,
&dev_addr, false, true, 0);
if (ret)
return ret;
@@ -815,22 +815,22 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
int ret;
- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
dev_addr.net = &init_net;
dev_addr.sgid_attr = sgid_attr;
init_completion(&ctx.comp);
- ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, true, &ctx);
+ ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr,
+ (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
+ resolve_cb, true, &ctx);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 98eadd3089ce..69188cbbd99b 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1347,32 +1347,35 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
}
-static int nldev_get_sys_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
- struct nlmsghdr *nlh;
+ struct sk_buff *msg;
int err;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err)
return err;
- nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
RDMA_NLDEV_CMD_SYS_GET),
0, 0);
- err = nla_put_u8(skb, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
(u8)ib_devices_shared_netns);
if (err) {
- nlmsg_cancel(skb, nlh);
+ nlmsg_free(msg);
return err;
}
-
- nlmsg_end(skb, nlh);
- return skb->len;
+ nlmsg_end(msg, nlh);
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
}
static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1442,7 +1445,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.dump = nldev_res_get_pd_dumpit,
},
[RDMA_NLDEV_CMD_SYS_GET] = {
- .dump = nldev_get_sys_get_dumpit,
+ .doit = nldev_sys_get_doit,
},
[RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit,
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 0a23048db523..e7ea819fcb11 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -295,10 +295,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
while (npages) {
down_read(&mm->mmap_sem);
- ret = get_user_pages_longterm(cur_base,
+ ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- gup_flags, page_list, NULL);
+ gup_flags | FOLL_LONGTERM,
+ page_list, NULL);
if (ret < 0) {
up_read(&mm->mmap_sem);
goto umem_release;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index c7226cf52acc..f962b5bbfa40 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -152,7 +152,7 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct ib_ucontext_per_mm *per_mm =
container_of(mn, struct ib_ucontext_per_mm, mn);
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
down_read(&per_mm->umem_rwsem);
else if (!down_read_trylock(&per_mm->umem_rwsem))
return -EAGAIN;
@@ -170,7 +170,8 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
range->end,
invalidate_range_start_trampoline,
- range->blockable, NULL);
+ mmu_notifier_range_blockable(range),
+ NULL);
}
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 24b592c6522e..02eee8eff1db 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -104,8 +104,9 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
bool writable, struct page **pages)
{
int ret;
+ unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
- ret = get_user_pages_fast(vaddr, npages, writable, pages);
+ ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
if (ret < 0)
return ret;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 169ffffcf5ed..80b42d069328 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -154,7 +154,7 @@ bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
* must be considered upon checking for a valid object id.
* For that the opcode of the creator command is encoded as part of the obj_id.
*/
-static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
+static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
{
return ((u64)opcode << 32) | obj_id;
}
@@ -167,7 +167,9 @@ static u64 devx_get_obj_id(const void *in)
switch (opcode) {
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
- obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_type) << 16,
MLX5_GET(general_obj_in_cmd_hdr, in,
obj_id));
break;
@@ -1171,6 +1173,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
+ u16 obj_type = 0;
int err;
int uid;
u32 obj_id;
@@ -1230,7 +1233,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (err)
goto err_copy;
- obj->obj_id = get_enc_obj_id(opcode, obj_id);
+ if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
+ obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
+
+ obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
+
return 0;
err_copy:
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 112d2f38e0de..8ff0e90d7564 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages);
+ ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
+ FOLL_WRITE | FOLL_LONGTERM, pages);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 1d4ea135c28f..8d3e36d548aa 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -83,7 +83,6 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct iphdr ipv4;
const struct ib_global_route *ib_grh;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -133,9 +132,9 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
ipv4.tot_len = htons(0);
ipv4.ttl = ib_grh->hop_limit;
ipv4.protocol = nxthdr;
- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
- rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid);
+ rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
} else {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 32674b291f60..5127e2ea4bdd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2499,7 +2499,6 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
u16 vlan_id = 0xFFFF;
u8 mac_addr[6], hdr_type;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -2542,8 +2541,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
hdr_type = rdma_gid_attr_network_type(sgid_attr);
if (hdr_type == RDMA_NETWORK_IPV4) {
- rdma_gid2ip(&sgid_addr._sockaddr, &sgid_attr->gid);
- rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
memcpy(&cmd->params.dgid[0],
&dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
memcpy(&cmd->params.sgid[0],
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 123ca8f64f75..f712fb7fa82f 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -114,10 +114,10 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_read(&current->mm->mmap_sem);
for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
- num_pages - got,
- FOLL_WRITE | FOLL_FORCE,
- p + got, NULL);
+ ret = get_user_pages(start_page + got * PAGE_SIZE,
+ num_pages - got,
+ FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
+ p + got, NULL);
if (ret < 0) {
up_read(&current->mm->mmap_sem);
goto bail_release;
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index ef19d39a44b1..0c204776263f 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
else
j = npages;
- ret = get_user_pages_fast(addr, j, 0, pages);
+ ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
if (ret != j) {
i = 0;
j = ret;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index da35d6fdfc5e..e312f522a66d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -143,10 +143,11 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages_longterm(cur_base,
- min_t(unsigned long, npages,
- PAGE_SIZE / sizeof(struct page *)),
- gup_flags, page_list, NULL);
+ ret = get_user_pages(cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof(struct page *)),
+ gup_flags | FOLL_LONGTERM,
+ page_list, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index f040d8881ff2..d1e25aba8212 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -503,14 +503,13 @@ static int evdev_open(struct inode *inode, struct file *file)
{
struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
- unsigned int size = sizeof(struct evdev_client) +
- bufsize * sizeof(struct input_event);
struct evdev_client *client;
int error;
- client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ client = kzalloc(struct_size(client, buffer, bufsize),
+ GFP_KERNEL | __GFP_NOWARN);
if (!client)
- client = vzalloc(size);
+ client = vzalloc(struct_size(client, buffer, bufsize));
if (!client)
return -ENOMEM;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 52d7f55fca32..82398827b64f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -137,6 +137,17 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
right-hand column will be interpreted as the key shown in the
left-hand column.
+config KEYBOARD_QT1050
+ tristate "Microchip AT42QT1050 Touch Sensor Chip"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to use Microchip AT42QT1050 QTouch
+ Sensor chip as input device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called qt1050
+
config KEYBOARD_QT1070
tristate "Atmel AT42QT1070 Touch Sensor Chip"
depends on I2C
@@ -194,7 +205,7 @@ config KEYBOARD_LKKBD
config KEYBOARD_EP93XX
tristate "EP93xx Matrix Keypad support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus EP93XX.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 182e92985dbf..f0291ca39f62 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
+obj-$(CONFIG_KEYBOARD_QT1050) += qt1050.o
obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SAMSUNG) += samsung-keypad.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 850bb259c20e..3ad93e3e2f4c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -401,6 +401,8 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
if (ps2_handle_response(&atkbd->ps2dev, data))
goto out;
+ pm_wakeup_event(&serio->dev, 0);
+
if (!atkbd->enabled)
goto out;
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index f77b295e0123..575dac52f7b4 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -27,8 +27,7 @@
#include <linux/io.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
-
-#include <mach/hardware.h>
+#include <linux/soc/cirrus/ep93xx.h>
#include <linux/platform_data/keypad-ep93xx.h>
/*
@@ -137,10 +136,7 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- if (pdata->flags & EP93XX_KEYPAD_KDIV)
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV4);
- else
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV16);
+ clk_set_rate(keypad->clk, pdata->clk_rate);
if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
val |= KEY_INIT_DIS3KY;
diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c
new file mode 100644
index 000000000000..403060d05c3b
--- /dev/null
+++ b/drivers/input/keyboard/qt1050.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AT42QT1050 QTouch Sensor Controller
+ *
+ * Copyright (C) 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ *
+ * Base on AT42QT1070 driver by:
+ * Bo Shen <voice.shen@atmel.com>
+ * Copyright (C) 2011 Atmel
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+/* Chip ID */
+#define QT1050_CHIP_ID 0x00
+#define QT1050_CHIP_ID_VER 0x46
+
+/* Firmware version */
+#define QT1050_FW_VERSION 0x01
+
+/* Detection status */
+#define QT1050_DET_STATUS 0x02
+
+/* Key status */
+#define QT1050_KEY_STATUS 0x03
+
+/* Key Signals */
+#define QT1050_KEY_SIGNAL_0_MSB 0x06
+#define QT1050_KEY_SIGNAL_0_LSB 0x07
+#define QT1050_KEY_SIGNAL_1_MSB 0x08
+#define QT1050_KEY_SIGNAL_1_LSB 0x09
+#define QT1050_KEY_SIGNAL_2_MSB 0x0c
+#define QT1050_KEY_SIGNAL_2_LSB 0x0d
+#define QT1050_KEY_SIGNAL_3_MSB 0x0e
+#define QT1050_KEY_SIGNAL_3_LSB 0x0f
+#define QT1050_KEY_SIGNAL_4_MSB 0x10
+#define QT1050_KEY_SIGNAL_4_LSB 0x11
+
+/* Reference data */
+#define QT1050_REF_DATA_0_MSB 0x14
+#define QT1050_REF_DATA_0_LSB 0x15
+#define QT1050_REF_DATA_1_MSB 0x16
+#define QT1050_REF_DATA_1_LSB 0x17
+#define QT1050_REF_DATA_2_MSB 0x1a
+#define QT1050_REF_DATA_2_LSB 0x1b
+#define QT1050_REF_DATA_3_MSB 0x1c
+#define QT1050_REF_DATA_3_LSB 0x1d
+#define QT1050_REF_DATA_4_MSB 0x1e
+#define QT1050_REF_DATA_4_LSB 0x1f
+
+/* Negative threshold level */
+#define QT1050_NTHR_0 0x21
+#define QT1050_NTHR_1 0x22
+#define QT1050_NTHR_2 0x24
+#define QT1050_NTHR_3 0x25
+#define QT1050_NTHR_4 0x26
+
+/* Pulse / Scale */
+#define QT1050_PULSE_SCALE_0 0x28
+#define QT1050_PULSE_SCALE_1 0x29
+#define QT1050_PULSE_SCALE_2 0x2b
+#define QT1050_PULSE_SCALE_3 0x2c
+#define QT1050_PULSE_SCALE_4 0x2d
+
+/* Detection integrator counter / AKS */
+#define QT1050_DI_AKS_0 0x2f
+#define QT1050_DI_AKS_1 0x30
+#define QT1050_DI_AKS_2 0x32
+#define QT1050_DI_AKS_3 0x33
+#define QT1050_DI_AKS_4 0x34
+
+/* Charge Share Delay */
+#define QT1050_CSD_0 0x36
+#define QT1050_CSD_1 0x37
+#define QT1050_CSD_2 0x39
+#define QT1050_CSD_3 0x3a
+#define QT1050_CSD_4 0x3b
+
+/* Low Power Mode */
+#define QT1050_LPMODE 0x3d
+
+/* Calibration and Reset */
+#define QT1050_RES_CAL 0x3f
+#define QT1050_RES_CAL_RESET BIT(7)
+#define QT1050_RES_CAL_CALIBRATE BIT(1)
+
+#define QT1050_MAX_KEYS 5
+#define QT1050_RESET_TIME 255
+
+struct qt1050_key_regs {
+ unsigned int nthr;
+ unsigned int pulse_scale;
+ unsigned int di_aks;
+ unsigned int csd;
+};
+
+struct qt1050_key {
+ u32 num;
+ u32 charge_delay;
+ u32 thr_cnt;
+ u32 samples;
+ u32 scale;
+ u32 keycode;
+};
+
+struct qt1050_priv {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct regmap *regmap;
+ struct qt1050_key keys[QT1050_MAX_KEYS];
+ unsigned short keycodes[QT1050_MAX_KEYS];
+ u8 reg_keys;
+ u8 last_keys;
+};
+
+static const struct qt1050_key_regs qt1050_key_regs_data[] = {
+ {
+ .nthr = QT1050_NTHR_0,
+ .pulse_scale = QT1050_PULSE_SCALE_0,
+ .di_aks = QT1050_DI_AKS_0,
+ .csd = QT1050_CSD_0,
+ }, {
+ .nthr = QT1050_NTHR_1,
+ .pulse_scale = QT1050_PULSE_SCALE_1,
+ .di_aks = QT1050_DI_AKS_1,
+ .csd = QT1050_CSD_1,
+ }, {
+ .nthr = QT1050_NTHR_2,
+ .pulse_scale = QT1050_PULSE_SCALE_2,
+ .di_aks = QT1050_DI_AKS_2,
+ .csd = QT1050_CSD_2,
+ }, {
+ .nthr = QT1050_NTHR_3,
+ .pulse_scale = QT1050_PULSE_SCALE_3,
+ .di_aks = QT1050_DI_AKS_3,
+ .csd = QT1050_CSD_3,
+ }, {
+ .nthr = QT1050_NTHR_4,
+ .pulse_scale = QT1050_PULSE_SCALE_4,
+ .di_aks = QT1050_DI_AKS_4,
+ .csd = QT1050_CSD_4,
+ }
+};
+
+static bool qt1050_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case QT1050_DET_STATUS:
+ case QT1050_KEY_STATUS:
+ case QT1050_KEY_SIGNAL_0_MSB:
+ case QT1050_KEY_SIGNAL_0_LSB:
+ case QT1050_KEY_SIGNAL_1_MSB:
+ case QT1050_KEY_SIGNAL_1_LSB:
+ case QT1050_KEY_SIGNAL_2_MSB:
+ case QT1050_KEY_SIGNAL_2_LSB:
+ case QT1050_KEY_SIGNAL_3_MSB:
+ case QT1050_KEY_SIGNAL_3_LSB:
+ case QT1050_KEY_SIGNAL_4_MSB:
+ case QT1050_KEY_SIGNAL_4_LSB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_range qt1050_readable_ranges[] = {
+ regmap_reg_range(QT1050_CHIP_ID, QT1050_KEY_STATUS),
+ regmap_reg_range(QT1050_KEY_SIGNAL_0_MSB, QT1050_KEY_SIGNAL_1_LSB),
+ regmap_reg_range(QT1050_KEY_SIGNAL_2_MSB, QT1050_KEY_SIGNAL_4_LSB),
+ regmap_reg_range(QT1050_REF_DATA_0_MSB, QT1050_REF_DATA_1_LSB),
+ regmap_reg_range(QT1050_REF_DATA_2_MSB, QT1050_REF_DATA_4_LSB),
+ regmap_reg_range(QT1050_NTHR_0, QT1050_NTHR_1),
+ regmap_reg_range(QT1050_NTHR_2, QT1050_NTHR_4),
+ regmap_reg_range(QT1050_PULSE_SCALE_0, QT1050_PULSE_SCALE_1),
+ regmap_reg_range(QT1050_PULSE_SCALE_2, QT1050_PULSE_SCALE_4),
+ regmap_reg_range(QT1050_DI_AKS_0, QT1050_DI_AKS_1),
+ regmap_reg_range(QT1050_DI_AKS_2, QT1050_DI_AKS_4),
+ regmap_reg_range(QT1050_CSD_0, QT1050_CSD_1),
+ regmap_reg_range(QT1050_CSD_2, QT1050_RES_CAL),
+};
+
+static const struct regmap_access_table qt1050_readable_table = {
+ .yes_ranges = qt1050_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qt1050_readable_ranges),
+};
+
+static const struct regmap_range qt1050_writeable_ranges[] = {
+ regmap_reg_range(QT1050_NTHR_0, QT1050_NTHR_1),
+ regmap_reg_range(QT1050_NTHR_2, QT1050_NTHR_4),
+ regmap_reg_range(QT1050_PULSE_SCALE_0, QT1050_PULSE_SCALE_1),
+ regmap_reg_range(QT1050_PULSE_SCALE_2, QT1050_PULSE_SCALE_4),
+ regmap_reg_range(QT1050_DI_AKS_0, QT1050_DI_AKS_1),
+ regmap_reg_range(QT1050_DI_AKS_2, QT1050_DI_AKS_4),
+ regmap_reg_range(QT1050_CSD_0, QT1050_CSD_1),
+ regmap_reg_range(QT1050_CSD_2, QT1050_RES_CAL),
+};
+
+static const struct regmap_access_table qt1050_writeable_table = {
+ .yes_ranges = qt1050_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qt1050_writeable_ranges),
+};
+
+static struct regmap_config qt1050_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = QT1050_RES_CAL,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &qt1050_writeable_table,
+ .rd_table = &qt1050_readable_table,
+ .volatile_reg = qt1050_volatile_reg,
+};
+
+static bool qt1050_identify(struct qt1050_priv *ts)
+{
+ unsigned int val;
+ int err;
+
+ /* Read Chip ID */
+ regmap_read(ts->regmap, QT1050_CHIP_ID, &val);
+ if (val != QT1050_CHIP_ID_VER) {
+ dev_err(&ts->client->dev, "ID %d not supported\n", val);
+ return false;
+ }
+
+ /* Read firmware version */
+ err = regmap_read(ts->regmap, QT1050_FW_VERSION, &val);
+ if (err) {
+ dev_err(&ts->client->dev, "could not read the firmware version\n");
+ return false;
+ }
+
+ dev_info(&ts->client->dev, "AT42QT1050 firmware version %1d.%1d\n",
+ val >> 4, val & 0xf);
+
+ return true;
+}
+
+static irqreturn_t qt1050_irq_threaded(int irq, void *dev_id)
+{
+ struct qt1050_priv *ts = dev_id;
+ struct input_dev *input = ts->input;
+ unsigned long new_keys, changed;
+ unsigned int val;
+ int i, err;
+
+ /* Read the detected status register, thus clearing interrupt */
+ err = regmap_read(ts->regmap, QT1050_DET_STATUS, &val);
+ if (err) {
+ dev_err(&ts->client->dev, "Fail to read detection status: %d\n",
+ err);
+ return IRQ_NONE;
+ }
+
+ /* Read which key changed, keys are not continuous */
+ err = regmap_read(ts->regmap, QT1050_KEY_STATUS, &val);
+ if (err) {
+ dev_err(&ts->client->dev,
+ "Fail to determine the key status: %d\n", err);
+ return IRQ_NONE;
+ }
+ new_keys = (val & 0x70) >> 2 | (val & 0x6) >> 1;
+ changed = ts->last_keys ^ new_keys;
+ /* Report registered keys only */
+ changed &= ts->reg_keys;
+
+ for_each_set_bit(i, &changed, QT1050_MAX_KEYS)
+ input_report_key(input, ts->keys[i].keycode,
+ test_bit(i, &new_keys));
+
+ ts->last_keys = new_keys;
+ input_sync(input);
+
+ return IRQ_HANDLED;
+}
+
+static const struct qt1050_key_regs *qt1050_get_key_regs(int key_num)
+{
+ return &qt1050_key_regs_data[key_num];
+}
+
+static int qt1050_set_key(struct regmap *map, int number, int on)
+{
+ const struct qt1050_key_regs *key_regs;
+
+ key_regs = qt1050_get_key_regs(number);
+
+ return regmap_update_bits(map, key_regs->di_aks, 0xfc,
+ on ? BIT(4) : 0x00);
+}
+
+static int qt1050_apply_fw_data(struct qt1050_priv *ts)
+{
+ struct regmap *map = ts->regmap;
+ struct qt1050_key *button = &ts->keys[0];
+ const struct qt1050_key_regs *key_regs;
+ int i, err;
+
+ /* Disable all keys and enable only the specified ones */
+ for (i = 0; i < QT1050_MAX_KEYS; i++) {
+ err = qt1050_set_key(map, i, 0);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < QT1050_MAX_KEYS; i++, button++) {
+ /* Keep KEY_RESERVED keys off */
+ if (button->keycode == KEY_RESERVED)
+ continue;
+
+ err = qt1050_set_key(map, button->num, 1);
+ if (err)
+ return err;
+
+ key_regs = qt1050_get_key_regs(button->num);
+
+ err = regmap_write(map, key_regs->pulse_scale,
+ (button->samples << 4) | (button->scale));
+ if (err)
+ return err;
+ err = regmap_write(map, key_regs->csd, button->charge_delay);
+ if (err)
+ return err;
+ err = regmap_write(map, key_regs->nthr, button->thr_cnt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int qt1050_parse_fw(struct qt1050_priv *ts)
+{
+ struct device *dev = &ts->client->dev;
+ struct fwnode_handle *child;
+ int nbuttons;
+
+ nbuttons = device_get_child_node_count(dev);
+ if (nbuttons == 0 || nbuttons > QT1050_MAX_KEYS)
+ return -ENODEV;
+
+ device_for_each_child_node(dev, child) {
+ struct qt1050_key button;
+
+ /* Required properties */
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button.keycode)) {
+ dev_err(dev, "Button without keycode\n");
+ goto err;
+ }
+ if (button.keycode >= KEY_MAX) {
+ dev_err(dev, "Invalid keycode 0x%x\n",
+ button.keycode);
+ goto err;
+ }
+
+ if (fwnode_property_read_u32(child, "reg",
+ &button.num)) {
+ dev_err(dev, "Button without pad number\n");
+ goto err;
+ }
+ if (button.num < 0 || button.num > QT1050_MAX_KEYS - 1)
+ goto err;
+
+ ts->reg_keys |= BIT(button.num);
+
+ /* Optional properties */
+ if (fwnode_property_read_u32(child,
+ "microchip,pre-charge-time-ns",
+ &button.charge_delay)) {
+ button.charge_delay = 0;
+ } else {
+ if (button.charge_delay % 2500 == 0)
+ button.charge_delay =
+ button.charge_delay / 2500;
+ else
+ button.charge_delay = 0;
+ }
+
+ if (fwnode_property_read_u32(child, "microchip,average-samples",
+ &button.samples)) {
+ button.samples = 0;
+ } else {
+ if (is_power_of_2(button.samples))
+ button.samples = ilog2(button.samples);
+ else
+ button.samples = 0;
+ }
+
+ if (fwnode_property_read_u32(child, "microchip,average-scaling",
+ &button.scale)) {
+ button.scale = 0;
+ } else {
+ if (is_power_of_2(button.scale))
+ button.scale = ilog2(button.scale);
+ else
+ button.scale = 0;
+
+ }
+
+ if (fwnode_property_read_u32(child, "microchip,threshold",
+ &button.thr_cnt)) {
+ button.thr_cnt = 20;
+ } else {
+ if (button.thr_cnt > 255)
+ button.thr_cnt = 20;
+ }
+
+ ts->keys[button.num] = button;
+ }
+
+ return 0;
+
+err:
+ fwnode_handle_put(child);
+ return -EINVAL;
+}
+
+static int qt1050_probe(struct i2c_client *client)
+{
+ struct qt1050_priv *ts;
+ struct input_dev *input;
+ struct device *dev = &client->dev;
+ struct regmap *map;
+ unsigned int status, i;
+ int err;
+
+ /* Check basic functionality */
+ err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE);
+ if (!err) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "assign a irq line to this device\n");
+ return -EINVAL;
+ }
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ map = devm_regmap_init_i2c(client, &qt1050_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ ts->client = client;
+ ts->input = input;
+ ts->regmap = map;
+
+ i2c_set_clientdata(client, ts);
+
+ /* Identify the qt1050 chip */
+ if (!qt1050_identify(ts))
+ return -ENODEV;
+
+ /* Get pdata */
+ err = qt1050_parse_fw(ts);
+ if (err) {
+ dev_err(dev, "Failed to parse firmware: %d\n", err);
+ return err;
+ }
+
+ input->name = "AT42QT1050 QTouch Sensor";
+ input->dev.parent = &client->dev;
+ input->id.bustype = BUS_I2C;
+
+ /* Add the keycode */
+ input->keycode = ts->keycodes;
+ input->keycodesize = sizeof(ts->keycodes[0]);
+ input->keycodemax = QT1050_MAX_KEYS;
+
+ __set_bit(EV_KEY, input->evbit);
+ for (i = 0; i < QT1050_MAX_KEYS; i++) {
+ ts->keycodes[i] = ts->keys[i].keycode;
+ __set_bit(ts->keycodes[i], input->keybit);
+ }
+
+ /* Trigger re-calibration */
+ err = regmap_update_bits(ts->regmap, QT1050_RES_CAL, 0x7f,
+ QT1050_RES_CAL_CALIBRATE);
+ if (err) {
+ dev_err(dev, "Trigger calibration failed: %d\n", err);
+ return err;
+ }
+ err = regmap_read_poll_timeout(ts->regmap, QT1050_DET_STATUS, status,
+ status >> 7 == 1, 10000, 200000);
+ if (err) {
+ dev_err(dev, "Calibration failed: %d\n", err);
+ return err;
+ }
+
+ /* Soft reset to set defaults */
+ err = regmap_update_bits(ts->regmap, QT1050_RES_CAL,
+ QT1050_RES_CAL_RESET, QT1050_RES_CAL_RESET);
+ if (err) {
+ dev_err(dev, "Trigger soft reset failed: %d\n", err);
+ return err;
+ }
+ msleep(QT1050_RESET_TIME);
+
+ /* Set pdata */
+ err = qt1050_apply_fw_data(ts);
+ if (err) {
+ dev_err(dev, "Failed to set firmware data: %d\n", err);
+ return err;
+ }
+
+ err = devm_request_threaded_irq(dev, client->irq, NULL,
+ qt1050_irq_threaded, IRQF_ONESHOT,
+ "qt1050", ts);
+ if (err) {
+ dev_err(&client->dev, "Failed to request irq: %d\n", err);
+ return err;
+ }
+
+ /* Clear #CHANGE line */
+ err = regmap_read(ts->regmap, QT1050_DET_STATUS, &status);
+ if (err) {
+ dev_err(dev, "Failed to clear #CHANGE line level: %d\n", err);
+ return err;
+ }
+
+ /* Register the input device */
+ err = input_register_device(ts->input);
+ if (err) {
+ dev_err(&client->dev, "Failed to register input device: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused qt1050_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct qt1050_priv *ts = i2c_get_clientdata(client);
+
+ disable_irq(client->irq);
+
+ /*
+ * Set measurement interval to 1s (125 x 8ms) if wakeup is allowed
+ * else turn off. The 1s interval seems to be a good compromise between
+ * low power and response time.
+ */
+ return regmap_write(ts->regmap, QT1050_LPMODE,
+ device_may_wakeup(dev) ? 125 : 0);
+}
+
+static int __maybe_unused qt1050_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct qt1050_priv *ts = i2c_get_clientdata(client);
+
+ enable_irq(client->irq);
+
+ /* Set measurement interval back to 16ms (2 x 8ms) */
+ return regmap_write(ts->regmap, QT1050_LPMODE, 2);
+}
+
+static SIMPLE_DEV_PM_OPS(qt1050_pm_ops, qt1050_suspend, qt1050_resume);
+
+static const struct of_device_id __maybe_unused qt1050_of_match[] = {
+ { .compatible = "microchip,qt1050", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qt1050_of_match);
+
+static struct i2c_driver qt1050_driver = {
+ .driver = {
+ .name = "qt1050",
+ .of_match_table = of_match_ptr(qt1050_of_match),
+ .pm = &qt1050_pm_ops,
+ },
+ .probe_new = qt1050_probe,
+};
+
+module_i2c_driver(qt1050_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de");
+MODULE_DESCRIPTION("Driver for AT42QT1050 QTouch sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 4c67cf30a5d9..5342d8d45f81 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -167,28 +168,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
}
device_init_wakeup(&pdev->dev, pdata->wakeup);
-
- return 0;
-}
-
-static int __maybe_unused imx_snvs_pwrkey_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
-
- if (device_may_wakeup(&pdev->dev))
- enable_irq_wake(pdata->irq);
-
- return 0;
-}
-
-static int __maybe_unused imx_snvs_pwrkey_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
-
- if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(pdata->irq);
+ error = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
+ if (error)
+ dev_err(&pdev->dev, "irq wake enable failed.\n");
return 0;
}
@@ -199,13 +181,9 @@ static const struct of_device_id imx_snvs_pwrkey_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_snvs_pwrkey_ids);
-static SIMPLE_DEV_PM_OPS(imx_snvs_pwrkey_pm_ops, imx_snvs_pwrkey_suspend,
- imx_snvs_pwrkey_resume);
-
static struct platform_driver imx_snvs_pwrkey_driver = {
.driver = {
.name = "snvs_pwrkey",
- .pm = &imx_snvs_pwrkey_pm_ops,
.of_match_table = imx_snvs_pwrkey_ids,
},
.probe = imx_snvs_pwrkey_probe,
diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c
index 57272df34cd5..df3eec72a9b2 100644
--- a/drivers/input/keyboard/sun4i-lradc-keys.c
+++ b/drivers/input/keyboard/sun4i-lradc-keys.c
@@ -46,6 +46,7 @@
#define CONTINUE_TIME_SEL(x) ((x) << 16) /* 4 bits */
#define KEY_MODE_SEL(x) ((x) << 12) /* 2 bits */
#define LEVELA_B_CNT(x) ((x) << 8) /* 4 bits */
+#define HOLD_KEY_EN(x) ((x) << 7)
#define HOLD_EN(x) ((x) << 6)
#define LEVELB_VOL(x) ((x) << 4) /* 2 bits */
#define SAMPLE_RATE(x) ((x) << 2) /* 2 bits */
@@ -63,6 +64,25 @@
#define CHAN0_KEYDOWN_IRQ BIT(1)
#define CHAN0_DATA_IRQ BIT(0)
+/* struct lradc_variant - Describe sun4i-a10-lradc-keys hardware variant
+ * @divisor_numerator: The numerator of lradc Vref internally divisor
+ * @divisor_denominator: The denominator of lradc Vref internally divisor
+ */
+struct lradc_variant {
+ u8 divisor_numerator;
+ u8 divisor_denominator;
+};
+
+static const struct lradc_variant lradc_variant_a10 = {
+ .divisor_numerator = 2,
+ .divisor_denominator = 3
+};
+
+static const struct lradc_variant r_lradc_variant_a83t = {
+ .divisor_numerator = 3,
+ .divisor_denominator = 4
+};
+
struct sun4i_lradc_keymap {
u32 voltage;
u32 keycode;
@@ -74,6 +94,7 @@ struct sun4i_lradc_data {
void __iomem *base;
struct regulator *vref_supply;
struct sun4i_lradc_keymap *chan0_map;
+ const struct lradc_variant *variant;
u32 chan0_map_count;
u32 chan0_keycode;
u32 vref;
@@ -128,9 +149,9 @@ static int sun4i_lradc_open(struct input_dev *dev)
if (error)
return error;
- /* lradc Vref internally is divided by 2/3 */
- lradc->vref = regulator_get_voltage(lradc->vref_supply) * 2 / 3;
-
+ lradc->vref = regulator_get_voltage(lradc->vref_supply) *
+ lradc->variant->divisor_numerator /
+ lradc->variant->divisor_denominator;
/*
* Set sample time to 4 ms / 250 Hz. Wait 2 * 4 ms for key to
* stabilize on press, wait (1 + 1) * 4 ms for key release
@@ -222,6 +243,12 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
if (error)
return error;
+ lradc->variant = of_device_get_match_data(&pdev->dev);
+ if (!lradc->variant) {
+ dev_err(&pdev->dev, "Missing sun4i-a10-lradc-keys variant\n");
+ return -EINVAL;
+ }
+
lradc->vref_supply = devm_regulator_get(dev, "vref");
if (IS_ERR(lradc->vref_supply))
return PTR_ERR(lradc->vref_supply);
@@ -265,7 +292,10 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
}
static const struct of_device_id sun4i_lradc_of_match[] = {
- { .compatible = "allwinner,sun4i-a10-lradc-keys", },
+ { .compatible = "allwinner,sun4i-a10-lradc-keys",
+ .data = &lradc_variant_a10 },
+ { .compatible = "allwinner,sun8i-a83t-r-lradc",
+ .data = &r_lradc_variant_a83t },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_lradc_of_match);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index e15ed1bb8558..54d36f98b426 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -190,6 +190,15 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX77650_ONKEY
+ tristate "Maxim MAX77650 ONKEY support"
+ depends on MFD_MAX77650
+ help
+ Support the ONKEY of the MAX77650 PMIC as an input device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called max77650-onkey.
+
config INPUT_MAX77693_HAPTIC
tristate "MAXIM MAX77693/MAX77843 haptic controller support"
depends on (MFD_MAX77693 || MFD_MAX77843) && PWM
@@ -290,6 +299,18 @@ config INPUT_GPIO_DECODER
To compile this driver as a module, choose M here: the module
will be called gpio_decoder.
+config INPUT_GPIO_VIBRA
+ tristate "GPIO vibrator support"
+ depends on GPIOLIB || COMPILE_TEST
+ select INPUT_FF_MEMLESS
+ help
+ Say Y here to get support for GPIO based vibrator devices.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will be
+ called gpio-vibra.
+
config INPUT_IXP4XX_BEEPER
tristate "IXP4XX Beeper support"
depends on ARCH_IXP4XX
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index b936c5b1d4ac..8fd187f314bd 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o
+obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o
obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX77650_ONKEY) += max77650-onkey.o
obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
diff --git a/drivers/input/misc/gpio-vibra.c b/drivers/input/misc/gpio-vibra.c
new file mode 100644
index 000000000000..f79f75595dd7
--- /dev/null
+++ b/drivers/input/misc/gpio-vibra.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPIO vibrator driver
+ *
+ * Copyright (C) 2019 Luca Weiss <luca@z3ntu.xyz>
+ *
+ * Based on PWM vibrator driver:
+ * Copyright (C) 2017 Collabora Ltd.
+ *
+ * Based on previous work from:
+ * Copyright (C) 2012 Dmitry Torokhov <dmitry.torokhov@gmail.com>
+ *
+ * Based on PWM beeper driver:
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+struct gpio_vibrator {
+ struct input_dev *input;
+ struct gpio_desc *gpio;
+ struct regulator *vcc;
+
+ struct work_struct play_work;
+ bool running;
+ bool vcc_on;
+};
+
+static int gpio_vibrator_start(struct gpio_vibrator *vibrator)
+{
+ struct device *pdev = vibrator->input->dev.parent;
+ int err;
+
+ if (!vibrator->vcc_on) {
+ err = regulator_enable(vibrator->vcc);
+ if (err) {
+ dev_err(pdev, "failed to enable regulator: %d\n", err);
+ return err;
+ }
+ vibrator->vcc_on = true;
+ }
+
+ gpiod_set_value_cansleep(vibrator->gpio, 1);
+
+ return 0;
+}
+
+static void gpio_vibrator_stop(struct gpio_vibrator *vibrator)
+{
+ gpiod_set_value_cansleep(vibrator->gpio, 0);
+
+ if (vibrator->vcc_on) {
+ regulator_disable(vibrator->vcc);
+ vibrator->vcc_on = false;
+ }
+}
+
+static void gpio_vibrator_play_work(struct work_struct *work)
+{
+ struct gpio_vibrator *vibrator =
+ container_of(work, struct gpio_vibrator, play_work);
+
+ if (vibrator->running)
+ gpio_vibrator_start(vibrator);
+ else
+ gpio_vibrator_stop(vibrator);
+}
+
+static int gpio_vibrator_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct gpio_vibrator *vibrator = input_get_drvdata(dev);
+ int level;
+
+ level = effect->u.rumble.strong_magnitude;
+ if (!level)
+ level = effect->u.rumble.weak_magnitude;
+
+ vibrator->running = level;
+ schedule_work(&vibrator->play_work);
+
+ return 0;
+}
+
+static void gpio_vibrator_close(struct input_dev *input)
+{
+ struct gpio_vibrator *vibrator = input_get_drvdata(input);
+
+ cancel_work_sync(&vibrator->play_work);
+ gpio_vibrator_stop(vibrator);
+ vibrator->running = false;
+}
+
+static int gpio_vibrator_probe(struct platform_device *pdev)
+{
+ struct gpio_vibrator *vibrator;
+ int err;
+
+ vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
+ if (!vibrator)
+ return -ENOMEM;
+
+ vibrator->input = devm_input_allocate_device(&pdev->dev);
+ if (!vibrator->input)
+ return -ENOMEM;
+
+ vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ err = PTR_ERR_OR_ZERO(vibrator->vcc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to request regulator: %d\n",
+ err);
+ return err;
+ }
+
+ vibrator->gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
+ err = PTR_ERR_OR_ZERO(vibrator->gpio);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to request main gpio: %d\n",
+ err);
+ return err;
+ }
+
+ INIT_WORK(&vibrator->play_work, gpio_vibrator_play_work);
+
+ vibrator->input->name = "gpio-vibrator";
+ vibrator->input->id.bustype = BUS_HOST;
+ vibrator->input->close = gpio_vibrator_close;
+
+ input_set_drvdata(vibrator->input, vibrator);
+ input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
+
+ err = input_ff_create_memless(vibrator->input, NULL,
+ gpio_vibrator_play_effect);
+ if (err) {
+ dev_err(&pdev->dev, "Couldn't create FF dev: %d\n", err);
+ return err;
+ }
+
+ err = input_register_device(vibrator->input);
+ if (err) {
+ dev_err(&pdev->dev, "Couldn't register input dev: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, vibrator);
+
+ return 0;
+}
+
+static int __maybe_unused gpio_vibrator_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_vibrator *vibrator = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&vibrator->play_work);
+ if (vibrator->running)
+ gpio_vibrator_stop(vibrator);
+
+ return 0;
+}
+
+static int __maybe_unused gpio_vibrator_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_vibrator *vibrator = platform_get_drvdata(pdev);
+
+ if (vibrator->running)
+ gpio_vibrator_start(vibrator);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(gpio_vibrator_pm_ops,
+ gpio_vibrator_suspend, gpio_vibrator_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_vibra_dt_match_table[] = {
+ { .compatible = "gpio-vibrator" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_vibra_dt_match_table);
+#endif
+
+static struct platform_driver gpio_vibrator_driver = {
+ .probe = gpio_vibrator_probe,
+ .driver = {
+ .name = "gpio-vibrator",
+ .pm = &gpio_vibrator_pm_ops,
+ .of_match_table = of_match_ptr(gpio_vibra_dt_match_table),
+ },
+};
+module_platform_driver(gpio_vibrator_driver);
+
+MODULE_AUTHOR("Luca Weiss <luca@z3ntu.xy>");
+MODULE_DESCRIPTION("GPIO vibrator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-vibrator");
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 1fe149f3def2..4776273fa10b 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -30,6 +30,8 @@ MODULE_ALIAS("platform:ixp4xx-beeper");
static DEFINE_SPINLOCK(beep_lock);
+static int ixp4xx_timer2_irq;
+
static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
{
unsigned long flags;
@@ -90,6 +92,7 @@ static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
static int ixp4xx_spkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
+ int irq;
int err;
input_dev = input_allocate_device();
@@ -110,15 +113,22 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = ixp4xx_spkr_event;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto err_free_device;
+ }
+
err = gpio_request(dev->id, "ixp4-beeper");
if (err)
goto err_free_device;
- err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt,
+ err = request_irq(irq, &ixp4xx_spkr_interrupt,
IRQF_NO_SUSPEND, "ixp4xx-beeper",
(void *) dev->id);
if (err)
goto err_free_gpio;
+ ixp4xx_timer2_irq = irq;
err = input_register_device(input_dev);
if (err)
@@ -129,7 +139,7 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
return 0;
err_free_irq:
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(irq, (void *)dev->id);
err_free_gpio:
gpio_free(dev->id);
err_free_device:
@@ -146,10 +156,10 @@ static int ixp4xx_spkr_remove(struct platform_device *dev)
input_unregister_device(input_dev);
/* turn the speaker off */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(ixp4xx_timer2_irq, (void *)dev->id);
gpio_free(dev->id);
return 0;
@@ -161,7 +171,7 @@ static void ixp4xx_spkr_shutdown(struct platform_device *dev)
unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
/* turn off the speaker */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
}
diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c
new file mode 100644
index 000000000000..fbf6caab7217
--- /dev/null
+++ b/drivers/input/misc/max77650-onkey.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// ONKEY driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77650_ONKEY_MODE_MASK BIT(3)
+#define MAX77650_ONKEY_MODE_PUSH 0x00
+#define MAX77650_ONKEY_MODE_SLIDE BIT(3)
+
+struct max77650_onkey {
+ struct input_dev *input;
+ unsigned int code;
+};
+
+static irqreturn_t max77650_onkey_falling(int irq, void *data)
+{
+ struct max77650_onkey *onkey = data;
+
+ input_report_key(onkey->input, onkey->code, 0);
+ input_sync(onkey->input);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max77650_onkey_rising(int irq, void *data)
+{
+ struct max77650_onkey *onkey = data;
+
+ input_report_key(onkey->input, onkey->code, 1);
+ input_sync(onkey->input);
+
+ return IRQ_HANDLED;
+}
+
+static int max77650_onkey_probe(struct platform_device *pdev)
+{
+ int irq_r, irq_f, error, mode;
+ struct max77650_onkey *onkey;
+ struct device *dev, *parent;
+ struct regmap *map;
+ unsigned int type;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+
+ map = dev_get_regmap(parent, NULL);
+ if (!map)
+ return -ENODEV;
+
+ onkey = devm_kzalloc(dev, sizeof(*onkey), GFP_KERNEL);
+ if (!onkey)
+ return -ENOMEM;
+
+ error = device_property_read_u32(dev, "linux,code", &onkey->code);
+ if (error)
+ onkey->code = KEY_POWER;
+
+ if (device_property_read_bool(dev, "maxim,onkey-slide")) {
+ mode = MAX77650_ONKEY_MODE_SLIDE;
+ type = EV_SW;
+ } else {
+ mode = MAX77650_ONKEY_MODE_PUSH;
+ type = EV_KEY;
+ }
+
+ error = regmap_update_bits(map, MAX77650_REG_CNFG_GLBL,
+ MAX77650_ONKEY_MODE_MASK, mode);
+ if (error)
+ return error;
+
+ irq_f = platform_get_irq_byname(pdev, "nEN_F");
+ if (irq_f < 0)
+ return irq_f;
+
+ irq_r = platform_get_irq_byname(pdev, "nEN_R");
+ if (irq_r < 0)
+ return irq_r;
+
+ onkey->input = devm_input_allocate_device(dev);
+ if (!onkey->input)
+ return -ENOMEM;
+
+ onkey->input->name = "max77650_onkey";
+ onkey->input->phys = "max77650_onkey/input0";
+ onkey->input->id.bustype = BUS_I2C;
+ input_set_capability(onkey->input, type, onkey->code);
+
+ error = devm_request_any_context_irq(dev, irq_f, max77650_onkey_falling,
+ IRQF_ONESHOT, "onkey-down", onkey);
+ if (error < 0)
+ return error;
+
+ error = devm_request_any_context_irq(dev, irq_r, max77650_onkey_rising,
+ IRQF_ONESHOT, "onkey-up", onkey);
+ if (error < 0)
+ return error;
+
+ return input_register_device(onkey->input);
+}
+
+static struct platform_driver max77650_onkey_driver = {
+ .driver = {
+ .name = "max77650-onkey",
+ },
+ .probe = max77650_onkey_probe,
+};
+module_platform_driver(max77650_onkey_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 ONKEY driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index d3ff1fc09af7..94f7ca5ad077 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -373,6 +373,8 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
if (ps2_handle_response(&psmouse->ps2dev, data))
goto out;
+ pm_wakeup_event(&serio->dev, 0);
+
if (psmouse->state <= PSMOUSE_RESYNCING)
goto out;
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index a6f515bcab22..516fea06ed59 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -456,25 +456,15 @@ static int rmi_f54_vidioc_fmt(struct file *file, void *priv,
static int rmi_f54_vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
+ struct f54_data *f54 = video_drvdata(file);
+
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- switch (fmt->index) {
- case 0:
- fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
- break;
-
- case 1:
- fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD08;
- break;
-
- case 2:
- fmt->pixelformat = V4L2_TCH_FMT_TU16;
- break;
-
- default:
+ if (fmt->index)
return -EINVAL;
- }
+
+ fmt->pixelformat = f54->format.pixelformat;
return 0;
}
@@ -692,6 +682,7 @@ static int rmi_f54_probe(struct rmi_function *fn)
return -ENOMEM;
rmi_f54_create_input_map(f54);
+ rmi_f54_set_input(f54, 0);
/* register video device */
strlcpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index c9c7224d5ae0..bfe436ccb046 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -254,6 +254,7 @@ config SERIO_APBPS2
config SERIO_OLPC_APSP
tristate "OLPC AP-SP input support"
+ depends on ARCH_MMP || COMPILE_TEST
help
Say Y here if you want support for the keyboard and touchpad included
in the OLPC XO-1.75 and XO-4 laptops.
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index a8b9be3e28db..7935e52b5435 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -440,5 +440,7 @@ static void __exit hv_kbd_exit(void)
}
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Keyboard Driver");
+
module_init(hv_kbd_init);
module_exit(hv_kbd_exit);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 95a78ccbd847..6462f1798fbb 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -573,9 +573,6 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
port = &i8042_ports[port_no];
serio = port->exists ? port->serio : NULL;
- if (irq && serio)
- pm_wakeup_event(&serio->dev, 0);
-
filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
port_no, irq,
dfl & SERIO_PARITY ? ", bad parity" : "",
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e6a07e68d1ff..22b8e05aa36c 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -409,6 +409,7 @@ bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data)
ps2dev->nak = PS2_RET_ERR;
break;
}
+ /* Fall through */
/*
* Workaround for mice which don't ACK the Get ID command.
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 7a4884ad198b..a2029c3235af 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1312,4 +1312,14 @@ config TOUCHSCREEN_ROHM_BU21023
To compile this driver as a module, choose M here: the
module will be called bu21023_ts.
+config TOUCHSCREEN_IQS5XX
+ tristate "Azoteq IQS550/572/525 trackpad/touchscreen controller"
+ depends on I2C
+ help
+ Say Y to enable support for the Azoteq IQS550/572/525
+ family of trackpad/touchscreen controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs5xx.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fcc7605fba8d..084a596a0c8b 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -110,3 +110,4 @@ obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
obj-$(CONFIG_TOUCHSCREEN_RASPBERRYPI_FW) += raspberrypi-ts.o
+obj-$(CONFIG_TOUCHSCREEN_IQS5XX) += iqs5xx.o
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 702bfda7ee77..c639ebce914c 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Simon Budig, <simon.budig@kernelconcepts.de>
* Daniel Wagener <daniel.wagener@kernelconcepts.de> (M09 firmware support)
* Lothar Waßmann <LW@KARO-electronics.de> (DT support)
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
@@ -39,7 +27,6 @@
#include <linux/gpio/consumer.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/of_device.h>
#define WORK_REGISTER_THRESHOLD 0x00
#define WORK_REGISTER_REPORT_RATE 0x08
@@ -1073,7 +1060,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return -ENOMEM;
}
- chip_data = of_device_get_match_data(&client->dev);
+ chip_data = device_get_match_data(&client->dev);
if (!chip_data)
chip_data = (const struct edt_i2c_chip_data *)id->driver_data;
if (!chip_data || !chip_data->max_support_points) {
@@ -1254,7 +1241,6 @@ static const struct i2c_device_id edt_ft5x06_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, edt_ft5x06_ts_id);
-#ifdef CONFIG_OF
static const struct of_device_id edt_ft5x06_of_match[] = {
{ .compatible = "edt,edt-ft5206", .data = &edt_ft5x06_data },
{ .compatible = "edt,edt-ft5306", .data = &edt_ft5x06_data },
@@ -1266,12 +1252,11 @@ static const struct of_device_id edt_ft5x06_of_match[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, edt_ft5x06_of_match);
-#endif
static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
.name = "edt_ft5x06",
- .of_match_table = of_match_ptr(edt_ft5x06_of_match),
+ .of_match_table = edt_ft5x06_of_match,
.pm = &edt_ft5x06_ts_pm_ops,
},
.id_table = edt_ft5x06_ts_id,
@@ -1283,4 +1268,4 @@ module_i2c_driver(edt_ft5x06_ts_driver);
MODULE_AUTHOR("Simon Budig <simon.budig@kernelconcepts.de>");
MODULE_DESCRIPTION("EDT FT5x06 I2C Touchscreen Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index f57d82220a88..f7c1d168dd89 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/of.h>
@@ -47,6 +48,8 @@ struct goodix_ts_data {
struct touchscreen_properties prop;
unsigned int max_touch_num;
unsigned int int_trigger_type;
+ struct regulator *avdd28;
+ struct regulator *vddio;
struct gpio_desc *gpiod_int;
struct gpio_desc *gpiod_rst;
u16 id;
@@ -216,6 +219,7 @@ static const struct goodix_chip_data *goodix_get_chip_data(u16 id)
{
switch (id) {
case 1151:
+ case 5663:
case 5688:
return &gt1x_chip_data;
@@ -532,6 +536,24 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return -EINVAL;
dev = &ts->client->dev;
+ ts->avdd28 = devm_regulator_get(dev, "AVDD28");
+ if (IS_ERR(ts->avdd28)) {
+ error = PTR_ERR(ts->avdd28);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get AVDD28 regulator: %d\n", error);
+ return error;
+ }
+
+ ts->vddio = devm_regulator_get(dev, "VDDIO");
+ if (IS_ERR(ts->vddio)) {
+ error = PTR_ERR(ts->vddio);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get VDDIO regulator: %d\n", error);
+ return error;
+ }
+
/* Get the interrupt GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
if (IS_ERR(gpiod)) {
@@ -764,6 +786,14 @@ err_release_cfg:
complete_all(&ts->firmware_loading_complete);
}
+static void goodix_disable_regulators(void *arg)
+{
+ struct goodix_ts_data *ts = arg;
+
+ regulator_disable(ts->vddio);
+ regulator_disable(ts->avdd28);
+}
+
static int goodix_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -789,6 +819,29 @@ static int goodix_ts_probe(struct i2c_client *client,
if (error)
return error;
+ /* power up the controller */
+ error = regulator_enable(ts->avdd28);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable AVDD28 regulator: %d\n",
+ error);
+ return error;
+ }
+
+ error = regulator_enable(ts->vddio);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable VDDIO regulator: %d\n",
+ error);
+ regulator_disable(ts->avdd28);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&client->dev,
+ goodix_disable_regulators, ts);
+ if (error)
+ return error;
+
if (ts->gpiod_int && ts->gpiod_rst) {
/* reset the controller */
error = goodix_reset(ts);
@@ -945,6 +998,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt5663" },
{ .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
{ .compatible = "goodix,gt9110" },
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
new file mode 100644
index 000000000000..b832fe062645
--- /dev/null
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS550/572/525 Trackpad/Touchscreen Controller
+ *
+ * Copyright (C) 2018
+ * Author: Jeff LaBundy <jeff@labundy.com>
+ *
+ * These devices require firmware exported from a PC-based configuration tool
+ * made available by the vendor. Firmware files may be pushed to the device's
+ * nonvolatile memory by writing the filename to the 'fw_file' sysfs control.
+ *
+ * Link to PC-based configuration tool and data sheet: http://www.azoteq.com/
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define IQS5XX_FW_FILE_LEN 64
+#define IQS5XX_NUM_RETRIES 10
+#define IQS5XX_NUM_POINTS 256
+#define IQS5XX_NUM_CONTACTS 5
+#define IQS5XX_WR_BYTES_MAX 2
+
+#define IQS5XX_PROD_NUM_IQS550 40
+#define IQS5XX_PROD_NUM_IQS572 58
+#define IQS5XX_PROD_NUM_IQS525 52
+#define IQS5XX_PROJ_NUM_A000 0
+#define IQS5XX_PROJ_NUM_B000 15
+#define IQS5XX_MAJOR_VER_MIN 2
+
+#define IQS5XX_RESUME 0x00
+#define IQS5XX_SUSPEND 0x01
+
+#define IQS5XX_SW_INPUT_EVENT 0x10
+#define IQS5XX_SETUP_COMPLETE 0x40
+#define IQS5XX_EVENT_MODE 0x01
+#define IQS5XX_TP_EVENT 0x04
+
+#define IQS5XX_FLIP_X 0x01
+#define IQS5XX_FLIP_Y 0x02
+#define IQS5XX_SWITCH_XY_AXIS 0x04
+
+#define IQS5XX_PROD_NUM 0x0000
+#define IQS5XX_ABS_X 0x0016
+#define IQS5XX_ABS_Y 0x0018
+#define IQS5XX_SYS_CTRL0 0x0431
+#define IQS5XX_SYS_CTRL1 0x0432
+#define IQS5XX_SYS_CFG0 0x058E
+#define IQS5XX_SYS_CFG1 0x058F
+#define IQS5XX_TOTAL_RX 0x063D
+#define IQS5XX_TOTAL_TX 0x063E
+#define IQS5XX_XY_CFG0 0x0669
+#define IQS5XX_X_RES 0x066E
+#define IQS5XX_Y_RES 0x0670
+#define IQS5XX_CHKSM 0x83C0
+#define IQS5XX_APP 0x8400
+#define IQS5XX_CSTM 0xBE00
+#define IQS5XX_PMAP_END 0xBFFF
+#define IQS5XX_END_COMM 0xEEEE
+
+#define IQS5XX_CHKSM_LEN (IQS5XX_APP - IQS5XX_CHKSM)
+#define IQS5XX_APP_LEN (IQS5XX_CSTM - IQS5XX_APP)
+#define IQS5XX_CSTM_LEN (IQS5XX_PMAP_END + 1 - IQS5XX_CSTM)
+#define IQS5XX_PMAP_LEN (IQS5XX_PMAP_END + 1 - IQS5XX_CHKSM)
+
+#define IQS5XX_REC_HDR_LEN 4
+#define IQS5XX_REC_LEN_MAX 255
+#define IQS5XX_REC_TYPE_DATA 0x00
+#define IQS5XX_REC_TYPE_EOF 0x01
+
+#define IQS5XX_BL_ADDR_MASK 0x40
+#define IQS5XX_BL_CMD_VER 0x00
+#define IQS5XX_BL_CMD_READ 0x01
+#define IQS5XX_BL_CMD_EXEC 0x02
+#define IQS5XX_BL_CMD_CRC 0x03
+#define IQS5XX_BL_BLK_LEN_MAX 64
+#define IQS5XX_BL_ID 0x0200
+#define IQS5XX_BL_STATUS_RESET 0x00
+#define IQS5XX_BL_STATUS_AVAIL 0xA5
+#define IQS5XX_BL_STATUS_NONE 0xEE
+#define IQS5XX_BL_CRC_PASS 0x00
+#define IQS5XX_BL_CRC_FAIL 0x01
+#define IQS5XX_BL_ATTEMPTS 3
+
+struct iqs5xx_private {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct gpio_desc *reset_gpio;
+ struct mutex lock;
+ u8 bl_status;
+};
+
+struct iqs5xx_dev_id_info {
+ __be16 prod_num;
+ __be16 proj_num;
+ u8 major_ver;
+ u8 minor_ver;
+ u8 bl_status;
+} __packed;
+
+struct iqs5xx_ihex_rec {
+ char start;
+ char len[2];
+ char addr[4];
+ char type[2];
+ char data[2];
+} __packed;
+
+struct iqs5xx_touch_data {
+ __be16 abs_x;
+ __be16 abs_y;
+ __be16 strength;
+ u8 area;
+} __packed;
+
+static int iqs5xx_read_burst(struct i2c_client *client,
+ u16 reg, void *val, u16 len)
+{
+ __be16 reg_buf = cpu_to_be16(reg);
+ int ret, i;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(reg_buf),
+ .buf = (u8 *)&reg_buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = (u8 *)val,
+ },
+ };
+
+ /*
+ * The first addressing attempt outside of a communication window fails
+ * and must be retried, after which the device clock stretches until it
+ * is available.
+ */
+ for (i = 0; i < IQS5XX_NUM_RETRIES; i++) {
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret == ARRAY_SIZE(msg))
+ return 0;
+
+ usleep_range(200, 300);
+ }
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to read from address 0x%04X: %d\n",
+ reg, ret);
+
+ return ret;
+}
+
+static int iqs5xx_read_word(struct i2c_client *client, u16 reg, u16 *val)
+{
+ __be16 val_buf;
+ int error;
+
+ error = iqs5xx_read_burst(client, reg, &val_buf, sizeof(val_buf));
+ if (error)
+ return error;
+
+ *val = be16_to_cpu(val_buf);
+
+ return 0;
+}
+
+static int iqs5xx_read_byte(struct i2c_client *client, u16 reg, u8 *val)
+{
+ return iqs5xx_read_burst(client, reg, val, sizeof(*val));
+}
+
+static int iqs5xx_write_burst(struct i2c_client *client,
+ u16 reg, const void *val, u16 len)
+{
+ int ret, i;
+ u16 mlen = sizeof(reg) + len;
+ u8 mbuf[sizeof(reg) + IQS5XX_WR_BYTES_MAX];
+
+ if (len > IQS5XX_WR_BYTES_MAX)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, mbuf);
+ memcpy(mbuf + sizeof(reg), val, len);
+
+ /*
+ * The first addressing attempt outside of a communication window fails
+ * and must be retried, after which the device clock stretches until it
+ * is available.
+ */
+ for (i = 0; i < IQS5XX_NUM_RETRIES; i++) {
+ ret = i2c_master_send(client, mbuf, mlen);
+ if (ret == mlen)
+ return 0;
+
+ usleep_range(200, 300);
+ }
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to write to address 0x%04X: %d\n",
+ reg, ret);
+
+ return ret;
+}
+
+static int iqs5xx_write_word(struct i2c_client *client, u16 reg, u16 val)
+{
+ __be16 val_buf = cpu_to_be16(val);
+
+ return iqs5xx_write_burst(client, reg, &val_buf, sizeof(val_buf));
+}
+
+static int iqs5xx_write_byte(struct i2c_client *client, u16 reg, u8 val)
+{
+ return iqs5xx_write_burst(client, reg, &val, sizeof(val));
+}
+
+static void iqs5xx_reset(struct i2c_client *client)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+
+ gpiod_set_value_cansleep(iqs5xx->reset_gpio, 1);
+ usleep_range(200, 300);
+
+ gpiod_set_value_cansleep(iqs5xx->reset_gpio, 0);
+}
+
+static int iqs5xx_bl_cmd(struct i2c_client *client, u8 bl_cmd, u16 bl_addr)
+{
+ struct i2c_msg msg;
+ int ret;
+ u8 mbuf[sizeof(bl_cmd) + sizeof(bl_addr)];
+
+ msg.addr = client->addr ^ IQS5XX_BL_ADDR_MASK;
+ msg.flags = 0;
+ msg.len = sizeof(bl_cmd);
+ msg.buf = mbuf;
+
+ *mbuf = bl_cmd;
+
+ switch (bl_cmd) {
+ case IQS5XX_BL_CMD_VER:
+ case IQS5XX_BL_CMD_CRC:
+ case IQS5XX_BL_CMD_EXEC:
+ break;
+ case IQS5XX_BL_CMD_READ:
+ msg.len += sizeof(bl_addr);
+ put_unaligned_be16(bl_addr, mbuf + sizeof(bl_cmd));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ switch (bl_cmd) {
+ case IQS5XX_BL_CMD_VER:
+ msg.len = sizeof(u16);
+ break;
+ case IQS5XX_BL_CMD_CRC:
+ msg.len = sizeof(u8);
+ /*
+ * This delay saves the bus controller the trouble of having to
+ * tolerate a relatively long clock-stretching period while the
+ * CRC is calculated.
+ */
+ msleep(50);
+ break;
+ case IQS5XX_BL_CMD_EXEC:
+ usleep_range(10000, 10100);
+ /* fall through */
+ default:
+ return 0;
+ }
+
+ msg.flags = I2C_M_RD;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ if (bl_cmd == IQS5XX_BL_CMD_VER &&
+ get_unaligned_be16(mbuf) != IQS5XX_BL_ID) {
+ dev_err(&client->dev, "Unrecognized bootloader ID: 0x%04X\n",
+ get_unaligned_be16(mbuf));
+ return -EINVAL;
+ }
+
+ if (bl_cmd == IQS5XX_BL_CMD_CRC && *mbuf != IQS5XX_BL_CRC_PASS) {
+ dev_err(&client->dev, "Bootloader CRC failed\n");
+ return -EIO;
+ }
+
+ return 0;
+
+msg_fail:
+ if (ret >= 0)
+ ret = -EIO;
+
+ if (bl_cmd != IQS5XX_BL_CMD_VER)
+ dev_err(&client->dev,
+ "Unsuccessful bootloader command 0x%02X: %d\n",
+ bl_cmd, ret);
+
+ return ret;
+}
+
+static int iqs5xx_bl_open(struct i2c_client *client)
+{
+ int error, i, j;
+
+ /*
+ * The device opens a bootloader polling window for 2 ms following the
+ * release of reset. If the host cannot establish communication during
+ * this time frame, it must cycle reset again.
+ */
+ for (i = 0; i < IQS5XX_BL_ATTEMPTS; i++) {
+ iqs5xx_reset(client);
+
+ for (j = 0; j < IQS5XX_NUM_RETRIES; j++) {
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_VER, 0);
+ if (!error || error == -EINVAL)
+ return error;
+ }
+ }
+
+ dev_err(&client->dev, "Failed to open bootloader: %d\n", error);
+
+ return error;
+}
+
+static int iqs5xx_bl_write(struct i2c_client *client,
+ u16 bl_addr, u8 *pmap_data, u16 pmap_len)
+{
+ struct i2c_msg msg;
+ int ret, i;
+ u8 mbuf[sizeof(bl_addr) + IQS5XX_BL_BLK_LEN_MAX];
+
+ if (pmap_len % IQS5XX_BL_BLK_LEN_MAX)
+ return -EINVAL;
+
+ msg.addr = client->addr ^ IQS5XX_BL_ADDR_MASK;
+ msg.flags = 0;
+ msg.len = sizeof(mbuf);
+ msg.buf = mbuf;
+
+ for (i = 0; i < pmap_len; i += IQS5XX_BL_BLK_LEN_MAX) {
+ put_unaligned_be16(bl_addr + i, mbuf);
+ memcpy(mbuf + sizeof(bl_addr), pmap_data + i,
+ sizeof(mbuf) - sizeof(bl_addr));
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ usleep_range(10000, 10100);
+ }
+
+ return 0;
+
+msg_fail:
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to write block at address 0x%04X: %d\n",
+ bl_addr + i, ret);
+
+ return ret;
+}
+
+static int iqs5xx_bl_verify(struct i2c_client *client,
+ u16 bl_addr, u8 *pmap_data, u16 pmap_len)
+{
+ struct i2c_msg msg;
+ int ret, i;
+ u8 bl_data[IQS5XX_BL_BLK_LEN_MAX];
+
+ if (pmap_len % IQS5XX_BL_BLK_LEN_MAX)
+ return -EINVAL;
+
+ msg.addr = client->addr ^ IQS5XX_BL_ADDR_MASK;
+ msg.flags = I2C_M_RD;
+ msg.len = sizeof(bl_data);
+ msg.buf = bl_data;
+
+ for (i = 0; i < pmap_len; i += IQS5XX_BL_BLK_LEN_MAX) {
+ ret = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_READ, bl_addr + i);
+ if (ret)
+ return ret;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1)
+ goto msg_fail;
+
+ if (memcmp(bl_data, pmap_data + i, sizeof(bl_data))) {
+ dev_err(&client->dev,
+ "Failed to verify block at address 0x%04X\n",
+ bl_addr + i);
+ return -EIO;
+ }
+ }
+
+ return 0;
+
+msg_fail:
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "Failed to read block at address 0x%04X: %d\n",
+ bl_addr + i, ret);
+
+ return ret;
+}
+
+static int iqs5xx_set_state(struct i2c_client *client, u8 state)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ int error1, error2;
+
+ if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ return 0;
+
+ mutex_lock(&iqs5xx->lock);
+
+ /*
+ * Addressing the device outside of a communication window prompts it
+ * to assert the RDY output, so disable the interrupt line to prevent
+ * the handler from servicing a false interrupt.
+ */
+ disable_irq(client->irq);
+
+ error1 = iqs5xx_write_byte(client, IQS5XX_SYS_CTRL1, state);
+ error2 = iqs5xx_write_byte(client, IQS5XX_END_COMM, 0);
+
+ usleep_range(50, 100);
+ enable_irq(client->irq);
+
+ mutex_unlock(&iqs5xx->lock);
+
+ if (error1)
+ return error1;
+
+ return error2;
+}
+
+static int iqs5xx_open(struct input_dev *input)
+{
+ struct iqs5xx_private *iqs5xx = input_get_drvdata(input);
+
+ return iqs5xx_set_state(iqs5xx->client, IQS5XX_RESUME);
+}
+
+static void iqs5xx_close(struct input_dev *input)
+{
+ struct iqs5xx_private *iqs5xx = input_get_drvdata(input);
+
+ iqs5xx_set_state(iqs5xx->client, IQS5XX_SUSPEND);
+}
+
+static int iqs5xx_axis_init(struct i2c_client *client)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ struct touchscreen_properties prop;
+ struct input_dev *input;
+ int error;
+ u16 max_x, max_x_hw;
+ u16 max_y, max_y_hw;
+ u8 val;
+
+ if (!iqs5xx->input) {
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = client->name;
+ input->id.bustype = BUS_I2C;
+ input->open = iqs5xx_open;
+ input->close = iqs5xx_close;
+
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_capability(input, EV_ABS, ABS_MT_PRESSURE);
+
+ error = input_mt_init_slots(input,
+ IQS5XX_NUM_CONTACTS, INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to initialize slots: %d\n", error);
+ return error;
+ }
+
+ input_set_drvdata(input, iqs5xx);
+ iqs5xx->input = input;
+ }
+
+ touchscreen_parse_properties(iqs5xx->input, true, &prop);
+
+ error = iqs5xx_read_byte(client, IQS5XX_TOTAL_RX, &val);
+ if (error)
+ return error;
+ max_x_hw = (val - 1) * IQS5XX_NUM_POINTS;
+
+ error = iqs5xx_read_byte(client, IQS5XX_TOTAL_TX, &val);
+ if (error)
+ return error;
+ max_y_hw = (val - 1) * IQS5XX_NUM_POINTS;
+
+ error = iqs5xx_read_byte(client, IQS5XX_XY_CFG0, &val);
+ if (error)
+ return error;
+
+ if (val & IQS5XX_SWITCH_XY_AXIS)
+ swap(max_x_hw, max_y_hw);
+
+ if (prop.swap_x_y)
+ val ^= IQS5XX_SWITCH_XY_AXIS;
+
+ if (prop.invert_x)
+ val ^= prop.swap_x_y ? IQS5XX_FLIP_Y : IQS5XX_FLIP_X;
+
+ if (prop.invert_y)
+ val ^= prop.swap_x_y ? IQS5XX_FLIP_X : IQS5XX_FLIP_Y;
+
+ error = iqs5xx_write_byte(client, IQS5XX_XY_CFG0, val);
+ if (error)
+ return error;
+
+ if (prop.max_x > max_x_hw) {
+ dev_err(&client->dev, "Invalid maximum x-coordinate: %u > %u\n",
+ prop.max_x, max_x_hw);
+ return -EINVAL;
+ } else if (prop.max_x == 0) {
+ error = iqs5xx_read_word(client, IQS5XX_X_RES, &max_x);
+ if (error)
+ return error;
+
+ input_abs_set_max(iqs5xx->input,
+ prop.swap_x_y ? ABS_MT_POSITION_Y :
+ ABS_MT_POSITION_X,
+ max_x);
+ } else {
+ max_x = (u16)prop.max_x;
+ }
+
+ if (prop.max_y > max_y_hw) {
+ dev_err(&client->dev, "Invalid maximum y-coordinate: %u > %u\n",
+ prop.max_y, max_y_hw);
+ return -EINVAL;
+ } else if (prop.max_y == 0) {
+ error = iqs5xx_read_word(client, IQS5XX_Y_RES, &max_y);
+ if (error)
+ return error;
+
+ input_abs_set_max(iqs5xx->input,
+ prop.swap_x_y ? ABS_MT_POSITION_X :
+ ABS_MT_POSITION_Y,
+ max_y);
+ } else {
+ max_y = (u16)prop.max_y;
+ }
+
+ /*
+ * Write horizontal and vertical resolution to the device in case its
+ * original defaults were overridden or swapped as per the properties
+ * specified in the device tree.
+ */
+ error = iqs5xx_write_word(client,
+ prop.swap_x_y ? IQS5XX_Y_RES : IQS5XX_X_RES,
+ max_x);
+ if (error)
+ return error;
+
+ return iqs5xx_write_word(client,
+ prop.swap_x_y ? IQS5XX_X_RES : IQS5XX_Y_RES,
+ max_y);
+}
+
+static int iqs5xx_dev_init(struct i2c_client *client)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ struct iqs5xx_dev_id_info *dev_id_info;
+ int error;
+ u8 val;
+ u8 buf[sizeof(*dev_id_info) + 1];
+
+ error = iqs5xx_read_burst(client, IQS5XX_PROD_NUM,
+ &buf[1], sizeof(*dev_id_info));
+ if (error)
+ return iqs5xx_bl_open(client);
+
+ /*
+ * A000 and B000 devices use 8-bit and 16-bit addressing, respectively.
+ * Querying an A000 device's version information with 16-bit addressing
+ * gives the appearance that the data is shifted by one byte; a nonzero
+ * leading array element suggests this could be the case (in which case
+ * the missing zero is prepended).
+ */
+ buf[0] = 0;
+ dev_id_info = (struct iqs5xx_dev_id_info *)&buf[(buf[1] > 0) ? 0 : 1];
+
+ switch (be16_to_cpu(dev_id_info->prod_num)) {
+ case IQS5XX_PROD_NUM_IQS550:
+ case IQS5XX_PROD_NUM_IQS572:
+ case IQS5XX_PROD_NUM_IQS525:
+ break;
+ default:
+ dev_err(&client->dev, "Unrecognized product number: %u\n",
+ be16_to_cpu(dev_id_info->prod_num));
+ return -EINVAL;
+ }
+
+ switch (be16_to_cpu(dev_id_info->proj_num)) {
+ case IQS5XX_PROJ_NUM_A000:
+ dev_err(&client->dev, "Unsupported project number: %u\n",
+ be16_to_cpu(dev_id_info->proj_num));
+ return iqs5xx_bl_open(client);
+ case IQS5XX_PROJ_NUM_B000:
+ break;
+ default:
+ dev_err(&client->dev, "Unrecognized project number: %u\n",
+ be16_to_cpu(dev_id_info->proj_num));
+ return -EINVAL;
+ }
+
+ if (dev_id_info->major_ver < IQS5XX_MAJOR_VER_MIN) {
+ dev_err(&client->dev, "Unsupported major version: %u\n",
+ dev_id_info->major_ver);
+ return iqs5xx_bl_open(client);
+ }
+
+ switch (dev_id_info->bl_status) {
+ case IQS5XX_BL_STATUS_AVAIL:
+ case IQS5XX_BL_STATUS_NONE:
+ break;
+ default:
+ dev_err(&client->dev,
+ "Unrecognized bootloader status: 0x%02X\n",
+ dev_id_info->bl_status);
+ return -EINVAL;
+ }
+
+ error = iqs5xx_axis_init(client);
+ if (error)
+ return error;
+
+ error = iqs5xx_read_byte(client, IQS5XX_SYS_CFG0, &val);
+ if (error)
+ return error;
+
+ val |= IQS5XX_SETUP_COMPLETE;
+ val &= ~IQS5XX_SW_INPUT_EVENT;
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG0, val);
+ if (error)
+ return error;
+
+ val = IQS5XX_TP_EVENT | IQS5XX_EVENT_MODE;
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG1, val);
+ if (error)
+ return error;
+
+ error = iqs5xx_write_byte(client, IQS5XX_END_COMM, 0);
+ if (error)
+ return error;
+
+ iqs5xx->bl_status = dev_id_info->bl_status;
+
+ /*
+ * Closure of the first communication window that appears following the
+ * release of reset appears to kick off an initialization period during
+ * which further communication is met with clock stretching. The return
+ * from this function is delayed so that further communication attempts
+ * avoid this period.
+ */
+ msleep(100);
+
+ return 0;
+}
+
+static irqreturn_t iqs5xx_irq(int irq, void *data)
+{
+ struct iqs5xx_private *iqs5xx = data;
+ struct iqs5xx_touch_data touch_data[IQS5XX_NUM_CONTACTS];
+ struct i2c_client *client = iqs5xx->client;
+ struct input_dev *input = iqs5xx->input;
+ int error, i;
+
+ /*
+ * This check is purely a precaution, as the device does not assert the
+ * RDY output during bootloader mode. If the device operates outside of
+ * bootloader mode, the input device is guaranteed to be allocated.
+ */
+ if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ return IRQ_NONE;
+
+ error = iqs5xx_read_burst(client, IQS5XX_ABS_X,
+ touch_data, sizeof(touch_data));
+ if (error)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(touch_data); i++) {
+ u16 pressure = be16_to_cpu(touch_data[i].strength);
+
+ input_mt_slot(input, i);
+ if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
+ pressure != 0)) {
+ input_report_abs(input, ABS_MT_POSITION_X,
+ be16_to_cpu(touch_data[i].abs_x));
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ be16_to_cpu(touch_data[i].abs_y));
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+ }
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+
+ error = iqs5xx_write_byte(client, IQS5XX_END_COMM, 0);
+ if (error)
+ return IRQ_NONE;
+
+ /*
+ * Once the communication window is closed, a small delay is added to
+ * ensure the device's RDY output has been deasserted by the time the
+ * interrupt handler returns.
+ */
+ usleep_range(50, 100);
+
+ return IRQ_HANDLED;
+}
+
+static int iqs5xx_fw_file_parse(struct i2c_client *client,
+ const char *fw_file, u8 *pmap)
+{
+ const struct firmware *fw;
+ struct iqs5xx_ihex_rec *rec;
+ size_t pos = 0;
+ int error, i;
+ u16 rec_num = 1;
+ u16 rec_addr;
+ u8 rec_len, rec_type, rec_chksm, chksm;
+ u8 rec_hdr[IQS5XX_REC_HDR_LEN];
+ u8 rec_data[IQS5XX_REC_LEN_MAX];
+
+ /*
+ * Firmware exported from the vendor's configuration tool deviates from
+ * standard ihex as follows: (1) the checksum for records corresponding
+ * to user-exported settings is not recalculated, and (2) an address of
+ * 0xFFFF is used for the EOF record.
+ *
+ * Because the ihex2fw tool tolerates neither (1) nor (2), the slightly
+ * nonstandard ihex firmware is parsed directly by the driver.
+ */
+ error = request_firmware(&fw, fw_file, &client->dev);
+ if (error) {
+ dev_err(&client->dev, "Failed to request firmware %s: %d\n",
+ fw_file, error);
+ return error;
+ }
+
+ do {
+ if (pos + sizeof(*rec) > fw->size) {
+ dev_err(&client->dev, "Insufficient firmware size\n");
+ error = -EINVAL;
+ break;
+ }
+ rec = (struct iqs5xx_ihex_rec *)(fw->data + pos);
+ pos += sizeof(*rec);
+
+ if (rec->start != ':') {
+ dev_err(&client->dev, "Invalid start at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ break;
+ }
+
+ error = hex2bin(rec_hdr, rec->len, sizeof(rec_hdr));
+ if (error) {
+ dev_err(&client->dev, "Invalid header at record %u\n",
+ rec_num);
+ break;
+ }
+
+ rec_len = *rec_hdr;
+ rec_addr = get_unaligned_be16(rec_hdr + sizeof(rec_len));
+ rec_type = *(rec_hdr + sizeof(rec_len) + sizeof(rec_addr));
+
+ if (pos + rec_len * 2 > fw->size) {
+ dev_err(&client->dev, "Insufficient firmware size\n");
+ error = -EINVAL;
+ break;
+ }
+ pos += (rec_len * 2);
+
+ error = hex2bin(rec_data, rec->data, rec_len);
+ if (error) {
+ dev_err(&client->dev, "Invalid data at record %u\n",
+ rec_num);
+ break;
+ }
+
+ error = hex2bin(&rec_chksm,
+ rec->data + rec_len * 2, sizeof(rec_chksm));
+ if (error) {
+ dev_err(&client->dev, "Invalid checksum at record %u\n",
+ rec_num);
+ break;
+ }
+
+ chksm = 0;
+ for (i = 0; i < sizeof(rec_hdr); i++)
+ chksm += rec_hdr[i];
+ for (i = 0; i < rec_len; i++)
+ chksm += rec_data[i];
+ chksm = ~chksm + 1;
+
+ if (chksm != rec_chksm && rec_addr < IQS5XX_CSTM) {
+ dev_err(&client->dev,
+ "Incorrect checksum at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ break;
+ }
+
+ switch (rec_type) {
+ case IQS5XX_REC_TYPE_DATA:
+ if (rec_addr < IQS5XX_CHKSM ||
+ rec_addr > IQS5XX_PMAP_END) {
+ dev_err(&client->dev,
+ "Invalid address at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ } else {
+ memcpy(pmap + rec_addr - IQS5XX_CHKSM,
+ rec_data, rec_len);
+ }
+ break;
+ case IQS5XX_REC_TYPE_EOF:
+ break;
+ default:
+ dev_err(&client->dev, "Invalid type at record %u\n",
+ rec_num);
+ error = -EINVAL;
+ }
+
+ if (error)
+ break;
+
+ rec_num++;
+ while (pos < fw->size) {
+ if (*(fw->data + pos) == ':')
+ break;
+ pos++;
+ }
+ } while (rec_type != IQS5XX_REC_TYPE_EOF);
+
+ release_firmware(fw);
+
+ return error;
+}
+
+static int iqs5xx_fw_file_write(struct i2c_client *client, const char *fw_file)
+{
+ struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
+ int error;
+ u8 *pmap;
+
+ if (iqs5xx->bl_status == IQS5XX_BL_STATUS_NONE)
+ return -EPERM;
+
+ pmap = kzalloc(IQS5XX_PMAP_LEN, GFP_KERNEL);
+ if (!pmap)
+ return -ENOMEM;
+
+ error = iqs5xx_fw_file_parse(client, fw_file, pmap);
+ if (error)
+ goto err_kfree;
+
+ mutex_lock(&iqs5xx->lock);
+
+ /*
+ * Disable the interrupt line in case the first attempt(s) to enter the
+ * bootloader don't happen quickly enough, in which case the device may
+ * assert the RDY output until the next attempt.
+ */
+ disable_irq(client->irq);
+
+ iqs5xx->bl_status = IQS5XX_BL_STATUS_RESET;
+
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_VER, 0);
+ if (error) {
+ error = iqs5xx_bl_open(client);
+ if (error)
+ goto err_reset;
+ }
+
+ error = iqs5xx_bl_write(client, IQS5XX_CHKSM, pmap, IQS5XX_PMAP_LEN);
+ if (error)
+ goto err_reset;
+
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_CRC, 0);
+ if (error)
+ goto err_reset;
+
+ error = iqs5xx_bl_verify(client, IQS5XX_CSTM,
+ pmap + IQS5XX_CHKSM_LEN + IQS5XX_APP_LEN,
+ IQS5XX_CSTM_LEN);
+ if (error)
+ goto err_reset;
+
+ error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_EXEC, 0);
+
+err_reset:
+ if (error) {
+ iqs5xx_reset(client);
+ usleep_range(10000, 10100);
+ }
+
+ error = iqs5xx_dev_init(client);
+ if (!error && iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ error = -EINVAL;
+
+ enable_irq(client->irq);
+
+ mutex_unlock(&iqs5xx->lock);
+
+err_kfree:
+ kfree(pmap);
+
+ return error;
+}
+
+static ssize_t fw_file_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs5xx->client;
+ size_t len = count;
+ bool input_reg = !iqs5xx->input;
+ char fw_file[IQS5XX_FW_FILE_LEN + 1];
+ int error;
+
+ if (!len)
+ return -EINVAL;
+
+ if (buf[len - 1] == '\n')
+ len--;
+
+ if (len > IQS5XX_FW_FILE_LEN)
+ return -ENAMETOOLONG;
+
+ memcpy(fw_file, buf, len);
+ fw_file[len] = '\0';
+
+ error = iqs5xx_fw_file_write(client, fw_file);
+ if (error)
+ return error;
+
+ /*
+ * If the input device was not allocated already, it is guaranteed to
+ * be allocated by this point and can finally be registered.
+ */
+ if (input_reg) {
+ error = input_register_device(iqs5xx->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register device: %d\n",
+ error);
+ return error;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(fw_file);
+
+static struct attribute *iqs5xx_attrs[] = {
+ &dev_attr_fw_file.attr,
+ NULL,
+};
+
+static const struct attribute_group iqs5xx_attr_group = {
+ .attrs = iqs5xx_attrs,
+};
+
+static int __maybe_unused iqs5xx_suspend(struct device *dev)
+{
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+ struct input_dev *input = iqs5xx->input;
+ int error = 0;
+
+ if (!input)
+ return error;
+
+ mutex_lock(&input->mutex);
+
+ if (input->users)
+ error = iqs5xx_set_state(iqs5xx->client, IQS5XX_SUSPEND);
+
+ mutex_unlock(&input->mutex);
+
+ return error;
+}
+
+static int __maybe_unused iqs5xx_resume(struct device *dev)
+{
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+ struct input_dev *input = iqs5xx->input;
+ int error = 0;
+
+ if (!input)
+ return error;
+
+ mutex_lock(&input->mutex);
+
+ if (input->users)
+ error = iqs5xx_set_state(iqs5xx->client, IQS5XX_RESUME);
+
+ mutex_unlock(&input->mutex);
+
+ return error;
+}
+
+static SIMPLE_DEV_PM_OPS(iqs5xx_pm, iqs5xx_suspend, iqs5xx_resume);
+
+static int iqs5xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iqs5xx_private *iqs5xx;
+ int error;
+
+ iqs5xx = devm_kzalloc(&client->dev, sizeof(*iqs5xx), GFP_KERNEL);
+ if (!iqs5xx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&client->dev, iqs5xx);
+
+ i2c_set_clientdata(client, iqs5xx);
+ iqs5xx->client = client;
+
+ iqs5xx->reset_gpio = devm_gpiod_get(&client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(iqs5xx->reset_gpio)) {
+ error = PTR_ERR(iqs5xx->reset_gpio);
+ dev_err(&client->dev, "Failed to request GPIO: %d\n", error);
+ return error;
+ }
+
+ mutex_init(&iqs5xx->lock);
+
+ iqs5xx_reset(client);
+ usleep_range(10000, 10100);
+
+ error = iqs5xx_dev_init(client);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, iqs5xx_irq, IRQF_ONESHOT,
+ client->name, iqs5xx);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ error = devm_device_add_group(&client->dev, &iqs5xx_attr_group);
+ if (error) {
+ dev_err(&client->dev, "Failed to add attributes: %d\n", error);
+ return error;
+ }
+
+ if (iqs5xx->input) {
+ error = input_register_device(iqs5xx->input);
+ if (error)
+ dev_err(&client->dev,
+ "Failed to register device: %d\n",
+ error);
+ }
+
+ return error;
+}
+
+static const struct i2c_device_id iqs5xx_id[] = {
+ { "iqs550", 0 },
+ { "iqs572", 1 },
+ { "iqs525", 2 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, iqs5xx_id);
+
+static const struct of_device_id iqs5xx_of_match[] = {
+ { .compatible = "azoteq,iqs550" },
+ { .compatible = "azoteq,iqs572" },
+ { .compatible = "azoteq,iqs525" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs5xx_of_match);
+
+static struct i2c_driver iqs5xx_i2c_driver = {
+ .driver = {
+ .name = "iqs5xx",
+ .of_match_table = iqs5xx_of_match,
+ .pm = &iqs5xx_pm,
+ },
+ .id_table = iqs5xx_id,
+ .probe = iqs5xx_probe,
+};
+module_i2c_driver(iqs5xx_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS550/572/525 Trackpad/Touchscreen Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6f07f3b21816..15b831113ded 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -359,6 +359,31 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
+config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
+ bool "Default to disabling bypass on ARM SMMU v1 and v2"
+ depends on ARM_SMMU
+ default y
+ help
+ Say Y here to (by default) disable bypass streams such that
+ incoming transactions from devices that are not attached to
+ an iommu domain will report an abort back to the device and
+ will not be allowed to pass through the SMMU.
+
+ Any old kernels that existed before this KConfig was
+ introduced would default to _allowing_ bypass (AKA the
+ equivalent of NO for this config). However the default for
+ this option is YES because the old behavior is insecure.
+
+ There are few reasons to allow unmatched stream bypass, and
+ even fewer good ones. If saying YES here breaks your board
+ you should work on fixing your board. This KConfig option
+ is expected to be removed in the future and we'll simply
+ hardcode the bypass disable in the code.
+
+ NOTE: the kernel command line parameter
+ 'arm-smmu.disable_bypass' will continue to override this
+ config.
+
config ARM_SMMU_V3
bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index f7cdd2ab7f11..09c9e45f7fa2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -165,7 +165,7 @@ static inline u16 get_pci_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
+ return pci_dev_id(pdev);
}
static inline int get_acpihid_device_id(struct device *dev,
@@ -1723,31 +1723,6 @@ static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
*
****************************************************************************/
-/*
- * This function adds a protection domain to the global protection domain list
- */
-static void add_domain_to_list(struct protection_domain *domain)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- list_add(&domain->list, &amd_iommu_pd_list);
- spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
-}
-
-/*
- * This function removes a protection domain to the global
- * protection domain list
- */
-static void del_domain_from_list(struct protection_domain *domain)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- list_del(&domain->list);
- spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
-}
-
static u16 domain_id_alloc(void)
{
int id;
@@ -1838,8 +1813,6 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
if (!dom)
return;
- del_domain_from_list(&dom->domain);
-
put_iova_domain(&dom->iovad);
free_pagetable(&dom->domain);
@@ -1880,8 +1853,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
/* Initialize reserved ranges */
copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
- add_domain_to_list(&dma_dom->domain);
-
return dma_dom;
free_dma_dom:
@@ -2122,23 +2093,6 @@ out_err:
return ret;
}
-/* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF (1 << 15)
-
-static bool pci_pri_tlp_required(struct pci_dev *pdev)
-{
- u16 status;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return false;
-
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
- return (status & PCI_PRI_TLP_OFF) ? true : false;
-}
-
/*
* If a device is not yet associated with a domain, this function makes the
* device visible in the domain
@@ -2167,7 +2121,7 @@ static int attach_device(struct device *dev,
dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
- dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
}
} else if (amd_iommu_iotlb_sup &&
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
@@ -2897,8 +2851,6 @@ static void protection_domain_free(struct protection_domain *domain)
if (!domain)
return;
- del_domain_from_list(domain);
-
if (domain->id)
domain_id_free(domain->id);
@@ -2928,8 +2880,6 @@ static struct protection_domain *protection_domain_alloc(void)
if (protection_domain_init(domain))
goto out_err;
- add_domain_to_list(domain);
-
return domain;
out_err:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index ff40ba758cf3..f977df90d2a4 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -189,12 +189,6 @@ static bool amd_iommu_pc_present __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
- * List of protection domains - used during resume
- */
-LIST_HEAD(amd_iommu_pd_list);
-spinlock_t amd_iommu_pd_lock;
-
-/*
* Pointer to the device table which is shared by all AMD IOMMUs
* it is indexed by the PCI device id or the HT unit id and contains
* information about the domain the device belongs to as well as the
@@ -2526,8 +2520,6 @@ static int __init early_amd_iommu_init(void)
*/
__set_bit(0, amd_iommu_pd_alloc_bitmap);
- spin_lock_init(&amd_iommu_pd_lock);
-
/*
* now the data structures are allocated and basically initialized
* start the real acpi table scan
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 87965e4d9647..85c488b8daea 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -675,12 +675,6 @@ extern struct list_head amd_iommu_list;
extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
/*
- * Declarations for the global list of all protection domains
- */
-extern spinlock_t amd_iommu_pd_lock;
-extern struct list_head amd_iommu_pd_list;
-
-/*
* Structure defining one entry in the device table
*/
struct dev_table_entry {
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
index a1226e4ab5f8..e9132a926761 100644
--- a/drivers/iommu/arm-smmu-regs.h
+++ b/drivers/iommu/arm-smmu-regs.h
@@ -147,6 +147,8 @@ enum arm_smmu_s2cr_privcfg {
#define CBAR_IRPTNDX_SHIFT 24
#define CBAR_IRPTNDX_MASK 0xff
+#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
+
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
#define CBA2R_RW64_32BIT (0 << 0)
#define CBA2R_RW64_64BIT (1 << 0)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index d3880010c6cf..4d5a694f02c2 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -29,6 +29,7 @@
#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/pci-ats.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
@@ -86,6 +87,7 @@
#define IDR5_VAX_52_BIT 1
#define ARM_SMMU_CR0 0x20
+#define CR0_ATSCHK (1 << 4)
#define CR0_CMDQEN (1 << 3)
#define CR0_EVTQEN (1 << 2)
#define CR0_PRIQEN (1 << 1)
@@ -294,6 +296,7 @@
#define CMDQ_ERR_CERROR_NONE_IDX 0
#define CMDQ_ERR_CERROR_ILL_IDX 1
#define CMDQ_ERR_CERROR_ABT_IDX 2
+#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
#define CMDQ_0_OP GENMASK_ULL(7, 0)
#define CMDQ_0_SSV (1UL << 11)
@@ -312,6 +315,12 @@
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
+#define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_ATC_0_GLOBAL (1UL << 9)
+#define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
+#define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
+
#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
@@ -433,6 +442,16 @@ struct arm_smmu_cmdq_ent {
u64 addr;
} tlbi;
+ #define CMDQ_OP_ATC_INV 0x40
+ #define ATC_INV_SIZE_ALL 52
+ struct {
+ u32 sid;
+ u32 ssid;
+ u64 addr;
+ u8 size;
+ bool global;
+ } atc;
+
#define CMDQ_OP_PRI_RESP 0x41
struct {
u32 sid;
@@ -505,19 +524,6 @@ struct arm_smmu_s2_cfg {
u64 vtcr;
};
-struct arm_smmu_strtab_ent {
- /*
- * An STE is "assigned" if the master emitting the corresponding SID
- * is attached to a domain. The behaviour of an unassigned STE is
- * determined by the disable_bypass parameter, whereas an assigned
- * STE behaves according to s1_cfg/s2_cfg, which themselves are
- * configured according to the domain type.
- */
- bool assigned;
- struct arm_smmu_s1_cfg *s1_cfg;
- struct arm_smmu_s2_cfg *s2_cfg;
-};
-
struct arm_smmu_strtab_cfg {
__le64 *strtab;
dma_addr_t strtab_dma;
@@ -591,9 +597,14 @@ struct arm_smmu_device {
};
/* SMMU private data for each master */
-struct arm_smmu_master_data {
+struct arm_smmu_master {
struct arm_smmu_device *smmu;
- struct arm_smmu_strtab_ent ste;
+ struct device *dev;
+ struct arm_smmu_domain *domain;
+ struct list_head domain_head;
+ u32 *sids;
+ unsigned int num_sids;
+ bool ats_enabled :1;
};
/* SMMU private data for an IOMMU domain */
@@ -618,6 +629,9 @@ struct arm_smmu_domain {
};
struct iommu_domain domain;
+
+ struct list_head devices;
+ spinlock_t devices_lock;
};
struct arm_smmu_option_prop {
@@ -820,6 +834,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
+ case CMDQ_OP_ATC_INV:
+ cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
+ cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
+ break;
case CMDQ_OP_PRI_RESP:
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
@@ -864,6 +886,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
[CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
+ [CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout",
};
int i;
@@ -883,6 +906,14 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
dev_err(smmu->dev, "retrying command fetch\n");
case CMDQ_ERR_CERROR_NONE_IDX:
return;
+ case CMDQ_ERR_CERROR_ATC_INV_IDX:
+ /*
+ * ATC Invalidation Completion timeout. CONS is still pointing
+ * at the CMD_SYNC. Attempt to complete other pending commands
+ * by repeating the CMD_SYNC, though we might well end up back
+ * here since the ATC invalidation may still be pending.
+ */
+ return;
case CMDQ_ERR_CERROR_ILL_IDX:
/* Fallthrough */
default:
@@ -999,7 +1030,7 @@ static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
return ret;
}
-static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
{
int ret;
bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
@@ -1009,6 +1040,7 @@ static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
: __arm_smmu_cmdq_issue_sync(smmu);
if (ret)
dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
+ return ret;
}
/* Context descriptor manipulation functions */
@@ -1025,7 +1057,6 @@ static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
val |= ARM_SMMU_TCR2CD(tcr, EPD0);
val |= ARM_SMMU_TCR2CD(tcr, EPD1);
val |= ARM_SMMU_TCR2CD(tcr, IPS);
- val |= ARM_SMMU_TCR2CD(tcr, TBI0);
return val;
}
@@ -1085,8 +1116,8 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_sync(smmu);
}
-static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
- __le64 *dst, struct arm_smmu_strtab_ent *ste)
+static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
+ __le64 *dst)
{
/*
* This is hideously complicated, but we only really care about
@@ -1106,6 +1137,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
*/
u64 val = le64_to_cpu(dst[0]);
bool ste_live = false;
+ struct arm_smmu_device *smmu = NULL;
+ struct arm_smmu_s1_cfg *s1_cfg = NULL;
+ struct arm_smmu_s2_cfg *s2_cfg = NULL;
+ struct arm_smmu_domain *smmu_domain = NULL;
struct arm_smmu_cmdq_ent prefetch_cmd = {
.opcode = CMDQ_OP_PREFETCH_CFG,
.prefetch = {
@@ -1113,6 +1148,25 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
},
};
+ if (master) {
+ smmu_domain = master->domain;
+ smmu = master->smmu;
+ }
+
+ if (smmu_domain) {
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ s1_cfg = &smmu_domain->s1_cfg;
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ case ARM_SMMU_DOMAIN_NESTED:
+ s2_cfg = &smmu_domain->s2_cfg;
+ break;
+ default:
+ break;
+ }
+ }
+
if (val & STRTAB_STE_0_V) {
switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
case STRTAB_STE_0_CFG_BYPASS:
@@ -1133,8 +1187,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val = STRTAB_STE_0_V;
/* Bypass/fault */
- if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
- if (!ste->assigned && disable_bypass)
+ if (!smmu_domain || !(s1_cfg || s2_cfg)) {
+ if (!smmu_domain && disable_bypass)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
@@ -1152,41 +1206,42 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
return;
}
- if (ste->s1_cfg) {
+ if (s1_cfg) {
BUG_ON(ste_live);
dst[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
-#ifdef CONFIG_PCI_ATS
- FIELD_PREP(STRTAB_STE_1_EATS, STRTAB_STE_1_EATS_TRANS) |
-#endif
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
- val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
}
- if (ste->s2_cfg) {
+ if (s2_cfg) {
BUG_ON(ste_live);
dst[2] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_2_S2VMID, ste->s2_cfg->vmid) |
- FIELD_PREP(STRTAB_STE_2_VTCR, ste->s2_cfg->vtcr) |
+ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R);
- dst[3] = cpu_to_le64(ste->s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
+ dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
}
+ if (master->ats_enabled)
+ dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
+ STRTAB_STE_1_EATS_TRANS));
+
arm_smmu_sync_ste_for_sid(smmu, sid);
dst[0] = cpu_to_le64(val);
arm_smmu_sync_ste_for_sid(smmu, sid);
@@ -1199,10 +1254,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
{
unsigned int i;
- struct arm_smmu_strtab_ent ste = { .assigned = false };
for (i = 0; i < nent; ++i) {
- arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
+ arm_smmu_write_strtab_ent(NULL, -1, strtab);
strtab += STRTAB_STE_DWORDS;
}
}
@@ -1390,6 +1444,96 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
return IRQ_WAKE_THREAD;
}
+static void
+arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ size_t log2_span;
+ size_t span_mask;
+ /* ATC invalidates are always on 4096-bytes pages */
+ size_t inval_grain_shift = 12;
+ unsigned long page_start, page_end;
+
+ *cmd = (struct arm_smmu_cmdq_ent) {
+ .opcode = CMDQ_OP_ATC_INV,
+ .substream_valid = !!ssid,
+ .atc.ssid = ssid,
+ };
+
+ if (!size) {
+ cmd->atc.size = ATC_INV_SIZE_ALL;
+ return;
+ }
+
+ page_start = iova >> inval_grain_shift;
+ page_end = (iova + size - 1) >> inval_grain_shift;
+
+ /*
+ * In an ATS Invalidate Request, the address must be aligned on the
+ * range size, which must be a power of two number of page sizes. We
+ * thus have to choose between grossly over-invalidating the region, or
+ * splitting the invalidation into multiple commands. For simplicity
+ * we'll go with the first solution, but should refine it in the future
+ * if multiple commands are shown to be more efficient.
+ *
+ * Find the smallest power of two that covers the range. The most
+ * significant differing bit between the start and end addresses,
+ * fls(start ^ end), indicates the required span. For example:
+ *
+ * We want to invalidate pages [8; 11]. This is already the ideal range:
+ * x = 0b1000 ^ 0b1011 = 0b11
+ * span = 1 << fls(x) = 4
+ *
+ * To invalidate pages [7; 10], we need to invalidate [0; 15]:
+ * x = 0b0111 ^ 0b1010 = 0b1101
+ * span = 1 << fls(x) = 16
+ */
+ log2_span = fls_long(page_start ^ page_end);
+ span_mask = (1ULL << log2_span) - 1;
+
+ page_start &= ~span_mask;
+
+ cmd->atc.addr = page_start << inval_grain_shift;
+ cmd->atc.size = log2_span;
+}
+
+static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ int i;
+
+ if (!master->ats_enabled)
+ return 0;
+
+ for (i = 0; i < master->num_sids; i++) {
+ cmd->atc.sid = master->sids[i];
+ arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
+ }
+
+ return arm_smmu_cmdq_issue_sync(master->smmu);
+}
+
+static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
+ int ssid, unsigned long iova, size_t size)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_master *master;
+
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
+ return 0;
+
+ arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head)
+ ret |= arm_smmu_atc_inv_master(master, &cmd);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ return ret ? -ETIMEDOUT : 0;
+}
+
/* IO_PGTABLE API */
static void arm_smmu_tlb_sync(void *cookie)
{
@@ -1493,6 +1637,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
}
mutex_init(&smmu_domain->init_mutex);
+ INIT_LIST_HEAD(&smmu_domain->devices);
+ spin_lock_init(&smmu_domain->devices_lock);
+
return &smmu_domain->domain;
}
@@ -1688,55 +1835,97 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
return step;
}
-static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
{
int i, j;
- struct arm_smmu_master_data *master = fwspec->iommu_priv;
struct arm_smmu_device *smmu = master->smmu;
- for (i = 0; i < fwspec->num_ids; ++i) {
- u32 sid = fwspec->ids[i];
+ for (i = 0; i < master->num_sids; ++i) {
+ u32 sid = master->sids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++)
- if (fwspec->ids[j] == sid)
+ if (master->sids[j] == sid)
break;
if (j < i)
continue;
- arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
+ arm_smmu_write_strtab_ent(master, sid, step);
}
}
-static void arm_smmu_detach_dev(struct device *dev)
+static int arm_smmu_enable_ats(struct arm_smmu_master *master)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_data *master = fwspec->iommu_priv;
+ int ret;
+ size_t stu;
+ struct pci_dev *pdev;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
- master->ste.assigned = false;
- arm_smmu_install_ste_for_dev(fwspec);
+ if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
+ !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
+ return -ENXIO;
+
+ pdev = to_pci_dev(master->dev);
+ if (pdev->untrusted)
+ return -EPERM;
+
+ /* Smallest Translation Unit: log2 of the smallest supported granule */
+ stu = __ffs(smmu->pgsize_bitmap);
+
+ ret = pci_enable_ats(pdev, stu);
+ if (ret)
+ return ret;
+
+ master->ats_enabled = true;
+ return 0;
+}
+
+static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+{
+ if (!master->ats_enabled || !dev_is_pci(master->dev))
+ return;
+
+ pci_disable_ats(to_pci_dev(master->dev));
+ master->ats_enabled = false;
+}
+
+static void arm_smmu_detach_dev(struct arm_smmu_master *master)
+{
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = master->domain;
+
+ if (!smmu_domain)
+ return;
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_del(&master->domain_head);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ master->domain = NULL;
+ arm_smmu_install_ste_for_dev(master);
+
+ /* Disabling ATS invalidates all ATC entries */
+ arm_smmu_disable_ats(master);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
+ unsigned long flags;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_master_data *master;
- struct arm_smmu_strtab_ent *ste;
+ struct arm_smmu_master *master;
if (!fwspec)
return -ENOENT;
master = fwspec->iommu_priv;
smmu = master->smmu;
- ste = &master->ste;
- /* Already attached to a different domain? */
- if (ste->assigned)
- arm_smmu_detach_dev(dev);
+ arm_smmu_detach_dev(master);
mutex_lock(&smmu_domain->init_mutex);
@@ -1756,21 +1945,19 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
}
- ste->assigned = true;
+ master->domain = smmu_domain;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
- ste->s1_cfg = NULL;
- ste->s2_cfg = NULL;
- } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- ste->s1_cfg = &smmu_domain->s1_cfg;
- ste->s2_cfg = NULL;
- arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
- } else {
- ste->s1_cfg = NULL;
- ste->s2_cfg = &smmu_domain->s2_cfg;
- }
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_add(&master->domain_head, &smmu_domain->devices);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- arm_smmu_install_ste_for_dev(fwspec);
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
+ arm_smmu_enable_ats(master);
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
+
+ arm_smmu_install_ste_for_dev(master);
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1790,12 +1977,18 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
static size_t
arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
- struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ int ret;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
if (!ops)
return 0;
- return ops->unmap(ops, iova, size);
+ ret = ops->unmap(ops, iova, size);
+ if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size))
+ return 0;
+
+ return ret;
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
@@ -1860,7 +2053,7 @@ static int arm_smmu_add_device(struct device *dev)
{
int i, ret;
struct arm_smmu_device *smmu;
- struct arm_smmu_master_data *master;
+ struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct iommu_group *group;
@@ -1882,13 +2075,16 @@ static int arm_smmu_add_device(struct device *dev)
if (!master)
return -ENOMEM;
+ master->dev = dev;
master->smmu = smmu;
+ master->sids = fwspec->ids;
+ master->num_sids = fwspec->num_ids;
fwspec->iommu_priv = master;
}
/* Check the SIDs are in range of the SMMU and our stream table */
- for (i = 0; i < fwspec->num_ids; i++) {
- u32 sid = fwspec->ids[i];
+ for (i = 0; i < master->num_sids; i++) {
+ u32 sid = master->sids[i];
if (!arm_smmu_sid_in_range(smmu, sid))
return -ERANGE;
@@ -1913,7 +2109,7 @@ static int arm_smmu_add_device(struct device *dev)
static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_data *master;
+ struct arm_smmu_master *master;
struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
@@ -1921,8 +2117,7 @@ static void arm_smmu_remove_device(struct device *dev)
master = fwspec->iommu_priv;
smmu = master->smmu;
- if (master && master->ste.assigned)
- arm_smmu_detach_dev(dev);
+ arm_smmu_detach_dev(master);
iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
kfree(master);
@@ -2454,13 +2649,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
if (reg & CR0_SMMUEN) {
- if (is_kdump_kernel()) {
- arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
- arm_smmu_device_disable(smmu);
- return -EBUSY;
- }
-
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
+ WARN_ON(is_kdump_kernel() && !disable_bypass);
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
}
ret = arm_smmu_device_disable(smmu);
@@ -2547,12 +2738,24 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
}
}
+ if (smmu->features & ARM_SMMU_FEAT_ATS) {
+ enables |= CR0_ATSCHK;
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable ATS check\n");
+ return ret;
+ }
+ }
+
ret = arm_smmu_setup_irqs(smmu);
if (ret) {
dev_err(smmu->dev, "failed to setup irqs\n");
return ret;
}
+ if (is_kdump_kernel())
+ enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
/* Enable the SMMU interface, or ensure bypass */
if (!bypass || disable_bypass) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 045d93884164..5e54cc0a28b3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -110,7 +110,8 @@ static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
-static bool disable_bypass;
+static bool disable_bypass =
+ IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
@@ -569,12 +570,13 @@ static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
- u32 fsr, fsynr;
+ u32 fsr, fsynr, cbfrsynra;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
void __iomem *cb_base;
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -585,10 +587,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
+ cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
dev_err_ratelimited(smmu->dev,
- "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
- fsr, iova, fsynr, cfg->cbndx);
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
writel(fsr, cb_base + ARM_SMMU_CB_FSR);
return IRQ_HANDLED;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 77aabe637a60..5e898047c390 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -206,12 +206,13 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0;
}
-static void iova_reserve_pci_windows(struct pci_dev *dev,
+static int iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
struct resource_entry *window;
unsigned long lo, hi;
+ phys_addr_t start = 0, end;
resource_list_for_each_entry(window, &bridge->windows) {
if (resource_type(window->res) != IORESOURCE_MEM)
@@ -221,6 +222,31 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
hi = iova_pfn(iovad, window->res->end - window->offset);
reserve_iova(iovad, lo, hi);
}
+
+ /* Get reserved DMA windows from host bridge */
+ resource_list_for_each_entry(window, &bridge->dma_ranges) {
+ end = window->res->start - window->offset;
+resv_iova:
+ if (end > start) {
+ lo = iova_pfn(iovad, start);
+ hi = iova_pfn(iovad, end);
+ reserve_iova(iovad, lo, hi);
+ } else {
+ /* dma_ranges list should be sorted */
+ dev_err(&dev->dev, "Failed to reserve IOVA\n");
+ return -EINVAL;
+ }
+
+ start = window->res->end - window->offset + 1;
+ /* If window is last entry */
+ if (window->node.next == &bridge->dma_ranges &&
+ end != ~(dma_addr_t)0) {
+ end = ~(dma_addr_t)0;
+ goto resv_iova;
+ }
+ }
+
+ return 0;
}
static int iova_reserve_iommu_regions(struct device *dev,
@@ -232,8 +258,11 @@ static int iova_reserve_iommu_regions(struct device *dev,
LIST_HEAD(resv_regions);
int ret = 0;
- if (dev_is_pci(dev))
- iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+ if (dev_is_pci(dev)) {
+ ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+ if (ret)
+ return ret;
+ }
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) {
@@ -619,17 +648,7 @@ out_free_pages:
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
{
- unsigned long uaddr = vma->vm_start;
- unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- int ret = -ENXIO;
-
- for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
- ret = vm_insert_page(vma, uaddr, pages[i]);
- if (ret)
- break;
- uaddr += PAGE_SIZE;
- }
- return ret;
+ return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 9c49300e9fb7..6d969a172fbb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -145,7 +145,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(info->path[0]);
+ size = struct_size(info, path, level);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 28cb713d728c..a209199f3af6 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1391,7 +1391,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
/* pdev will be returned if device is not a vf */
pf_pdev = pci_physfn(pdev);
- info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ info->pfsid = pci_dev_id(pf_pdev);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -2341,32 +2341,33 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
-{
- int ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- /* Notify about the new mapping */
- if (domain_type_is_vm(domain)) {
- /* VM typed domains can have more than one IOMMUs */
- int iommu_id;
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
- } else {
- /* General domains only have one IOMMU */
- iommu = domain_get_iommu(domain);
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
+{
+ int ret;
+ struct intel_iommu *iommu;
+
+ /* Do the real mapping first */
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ if (ret)
+ return ret;
+
+ /* Notify about the new mapping */
+ if (domain_type_is_vm(domain)) {
+ /* VM typed domains can have more than one IOMMUs */
+ int iommu_id;
+
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+ } else {
+ /* General domains only have one IOMMU */
+ iommu = domain_get_iommu(domain);
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
- return 0;
+ return 0;
}
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
@@ -2485,6 +2486,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->domain = domain;
info->iommu = iommu;
info->pasid_table = NULL;
+ info->auxd_enabled = 0;
+ INIT_LIST_HEAD(&info->auxiliary_domains);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -3412,9 +3415,12 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
+ dmar_map_gfx = 0;
#endif
+ if (!dmar_map_gfx)
+ iommu_identity_mapping |= IDENTMAP_GFX;
+
check_tylersburg_isoch();
if (iommu_identity_mapping) {
@@ -3496,7 +3502,13 @@ domains_done:
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
+ /*
+ * Call dmar_alloc_hwirq() with dmar_global_lock held,
+ * could cause possible lock race condition.
+ */
+ up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
+ down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
@@ -3606,45 +3618,40 @@ out:
}
/* Check if the dev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct device *dev)
+static bool iommu_need_mapping(struct device *dev)
{
int found;
if (iommu_dummy(dev))
- return 1;
+ return false;
if (!iommu_identity_mapping)
- return 0;
+ return true;
found = identity_mapping(dev);
if (found) {
if (iommu_should_identity_map(dev, 0))
- return 1;
- else {
- /*
- * 32 bit DMA is removed from si_domain and fall back
- * to non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- return 0;
- }
+ return false;
+
+ /*
+ * 32 bit DMA is removed from si_domain and fall back to
+ * non-identity mapping.
+ */
+ dmar_remove_one_dev_info(dev);
+ dev_info(dev, "32bit DMA uses non-identity mapping\n");
} else {
/*
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
- if (iommu_should_identity_map(dev, 0)) {
- int ret;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret) {
- dev_info(dev, "64bit DMA uses identity mapping\n");
- return 1;
- }
+ if (iommu_should_identity_map(dev, 0) &&
+ !domain_add_dev_info(si_domain, dev)) {
+ dev_info(dev, "64bit DMA uses identity mapping\n");
+ return false;
}
}
- return 0;
+ return true;
}
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
@@ -3660,9 +3667,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return paddr;
-
domain = get_valid_domain_for_dev(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3711,15 +3715,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, page_to_phys(page) + offset,
+ size, dir, *dev->dma_mask);
+ return dma_direct_map_page(dev, page, offset, size, dir, attrs);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, phys_addr, size, dir,
+ *dev->dma_mask);
+ return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3730,9 +3739,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
unsigned long iova_pfn;
struct intel_iommu *iommu;
struct page *freelist;
-
- if (iommu_no_mapping(dev))
- return;
+ struct pci_dev *pdev = NULL;
domain = find_domain(dev);
BUG_ON(!domain);
@@ -3745,11 +3752,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
start_pfn = mm_to_dma_pfn(iova_pfn);
last_pfn = start_pfn + nrpages - 1;
+ if (dev_is_pci(dev))
+ pdev = to_pci_dev(dev);
+
dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict) {
+ if (intel_iommu_strict || (pdev && pdev->untrusted)) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
nrpages, !freelist, 0);
/* free iova */
@@ -3769,7 +3779,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- intel_unmap(dev, dev_addr, size);
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
+ else
+ dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3779,28 +3799,17 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
- if (!iommu_no_mapping(dev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
-
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
flags & __GFP_NOWARN);
- if (page && iommu_no_mapping(dev) &&
- page_to_phys(page) + size > dev->coherent_dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
}
if (!page)
@@ -3826,6 +3835,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3843,6 +3855,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
+
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
@@ -3850,20 +3865,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
-static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sglist, int nelems, int dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i) {
- BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nelems;
-}
-
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, unsigned long attrs)
{
@@ -3878,8 +3879,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = get_valid_domain_for_dev(dev);
if (!domain)
@@ -3929,7 +3930,7 @@ static const struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.map_resource = intel_map_resource,
- .unmap_resource = intel_unmap_page,
+ .unmap_resource = intel_unmap_resource,
.dma_supported = dma_direct_supported,
};
@@ -4055,9 +4056,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
- if (dmar_map_gfx) {
- intel_iommu_gfx_mapped = 1;
- } else {
+ if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
@@ -4086,7 +4085,7 @@ static int init_iommu_hw(void)
iommu_disable_protect_mem_regions(iommu);
continue;
}
-
+
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
@@ -4896,6 +4895,9 @@ int __init intel_iommu_init(void)
goto out_free_reserved_range;
}
+ if (dmar_map_gfx)
+ intel_iommu_gfx_mapped = 1;
+
init_no_remapping_devices();
ret = init_dmars();
@@ -5065,35 +5067,139 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
domain_exit(to_dmar_domain(domain));
}
-static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+/*
+ * Check whether a @domain could be attached to the @dev through the
+ * aux-domain attach/detach APIs.
+ */
+static inline bool
+is_aux_domain(struct device *dev, struct iommu_domain *domain)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
- int addr_width;
- u8 bus, devfn;
+ struct device_domain_info *info = dev->archdata.iommu;
- if (device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
+ return info && info->auxd_enabled &&
+ domain->type == IOMMU_DOMAIN_UNMANAGED;
+}
- /* normally dev is not mapped */
- if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
+static void auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
- old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
- dmar_remove_one_dev_info(dev);
- rcu_read_unlock();
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
+ domain->auxd_refcnt++;
+ list_add(&domain->auxd, &info->auxiliary_domains);
+}
+
+static void auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
+
+ list_del(&domain->auxd);
+ domain->auxd_refcnt--;
+
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+}
+
+static int aux_domain_add_dev(struct dmar_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ u8 bus, devfn;
+ unsigned long flags;
+ struct intel_iommu *iommu;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (domain->default_pasid <= 0) {
+ int pasid;
+
+ pasid = intel_pasid_alloc_id(domain, PASID_MIN,
+ pci_max_pasids(to_pci_dev(dev)),
+ GFP_KERNEL);
+ if (pasid <= 0) {
+ pr_err("Can't allocate default pasid\n");
+ return -ENODEV;
}
+ domain->default_pasid = pasid;
}
+ spin_lock_irqsave(&device_domain_lock, flags);
+ /*
+ * iommu->lock must be held to attach domain to iommu and setup the
+ * pasid entry for second level translation.
+ */
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ if (ret)
+ goto attach_failed;
+
+ /* Setup the PASID entry for mediated devices: */
+ ret = intel_pasid_setup_second_level(iommu, domain, dev,
+ domain->default_pasid);
+ if (ret)
+ goto table_failed;
+ spin_unlock(&iommu->lock);
+
+ auxiliary_link_device(domain, dev);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+
+table_failed:
+ domain_detach_iommu(domain, iommu);
+attach_failed:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+
+ return ret;
+}
+
+static void aux_domain_remove_dev(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+
+ if (!is_aux_domain(dev, &domain->domain))
+ return;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ iommu = info->iommu;
+
+ auxiliary_unlink_device(domain, dev);
+
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static int prepare_domain_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct intel_iommu *iommu;
+ int addr_width;
+ u8 bus, devfn;
+
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
@@ -5126,7 +5232,58 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, dev);
+ return 0;
+}
+
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret;
+
+ if (device_is_rmrr_locked(dev)) {
+ dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
+ return -EPERM;
+ }
+
+ if (is_aux_domain(dev, domain))
+ return -EPERM;
+
+ /* normally dev is not mapped */
+ if (unlikely(domain_context_mapped(dev))) {
+ struct dmar_domain *old_domain;
+
+ old_domain = find_domain(dev);
+ if (old_domain) {
+ rcu_read_lock();
+ dmar_remove_one_dev_info(dev);
+ rcu_read_unlock();
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
+ }
+ }
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return domain_add_dev_info(to_dmar_domain(domain), dev);
+}
+
+static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret;
+
+ if (!is_aux_domain(dev, domain))
+ return -EPERM;
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return aux_domain_add_dev(to_dmar_domain(domain), dev);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
@@ -5135,6 +5292,12 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
dmar_remove_one_dev_info(dev);
}
+static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ aux_domain_remove_dev(to_dmar_domain(domain), dev);
+}
+
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot)
@@ -5223,6 +5386,42 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
+static inline bool scalable_mode_support(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = true;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!sm_supported(iommu)) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool iommu_pasid_support(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = true;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!pasid_supported(iommu)) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5307,8 +5506,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
}
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
struct device_domain_info *info;
struct context_entry *context;
@@ -5317,7 +5515,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
u64 ctx_lo;
int ret;
- domain = get_valid_domain_for_dev(sdev->dev);
+ domain = get_valid_domain_for_dev(dev);
if (!domain)
return -EINVAL;
@@ -5325,7 +5523,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
spin_lock(&iommu->lock);
ret = -EINVAL;
- info = sdev->dev->archdata.iommu;
+ info = dev->archdata.iommu;
if (!info || !info->pasid_supported)
goto out;
@@ -5335,14 +5533,13 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
- sdev->did = FLPT_DEFAULT_DID;
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
-
if (!(ctx_lo & CONTEXT_PASIDE)) {
ctx_lo |= CONTEXT_PASIDE;
context[0].lo = ctx_lo;
wmb();
- iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
+ iommu->flush.flush_context(iommu,
+ domain->iommu_did[iommu->seq_id],
+ PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
}
@@ -5351,12 +5548,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
if (!info->pasid_enabled)
iommu_enable_dev_iotlb(info);
- if (info->ats_enabled) {
- sdev->dev_iotlb = 1;
- sdev->qdep = info->ats_qdep;
- if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
- sdev->qdep = 0;
- }
ret = 0;
out:
@@ -5366,6 +5557,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
return ret;
}
+#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
struct intel_iommu *iommu;
@@ -5387,12 +5579,142 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
+static int intel_iommu_enable_auxd(struct device *dev)
+{
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ u8 bus, devfn;
+ int ret;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu || dmar_disabled)
+ return -EINVAL;
+
+ if (!sm_supported(iommu) || !pasid_supported(iommu))
+ return -EINVAL;
+
+ ret = intel_iommu_enable_pasid(iommu, dev);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ info->auxd_enabled = 1;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+static int intel_iommu_disable_auxd(struct device *dev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ if (!WARN_ON(!info))
+ info->auxd_enabled = 0;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+/*
+ * A PCI express designated vendor specific extended capability is defined
+ * in the section 3.7 of Intel scalable I/O virtualization technical spec
+ * for system software and tools to detect endpoint devices supporting the
+ * Intel scalable IO virtualization without host driver dependency.
+ *
+ * Returns the address of the matching extended capability structure within
+ * the device's PCI configuration space or 0 if the device does not support
+ * it.
+ */
+static int siov_find_pci_dvsec(struct pci_dev *pdev)
+{
+ int pos;
+ u16 vendor, id;
+
+ pos = pci_find_next_ext_capability(pdev, 0, 0x23);
+ while (pos) {
+ pci_read_config_word(pdev, pos + 4, &vendor);
+ pci_read_config_word(pdev, pos + 8, &id);
+ if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
+ return pos;
+
+ pos = pci_find_next_ext_capability(pdev, pos, 0x23);
+ }
+
+ return 0;
+}
+
+static bool
+intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX) {
+ int ret;
+
+ if (!dev_is_pci(dev) || dmar_disabled ||
+ !scalable_mode_support() || !iommu_pasid_support())
+ return false;
+
+ ret = pci_pasid_features(to_pci_dev(dev));
+ if (ret < 0)
+ return false;
+
+ return !!siov_find_pci_dvsec(to_pci_dev(dev));
+ }
+
+ return false;
+}
+
+static int
+intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_enable_auxd(dev);
+
+ return -ENODEV;
+}
+
+static int
+intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_disable_auxd(dev);
+
+ return -ENODEV;
+}
+
+static bool
+intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return scalable_mode_support() && info && info->auxd_enabled;
+
+ return false;
+}
+
+static int
+intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ return dmar_domain->default_pasid > 0 ?
+ dmar_domain->default_pasid : -EINVAL;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
+ .aux_attach_dev = intel_iommu_aux_attach_device,
+ .aux_detach_dev = intel_iommu_aux_detach_device,
+ .aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
@@ -5401,6 +5723,10 @@ const struct iommu_ops intel_iommu_ops = {
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
.device_group = pci_device_group,
+ .dev_has_feat = intel_iommu_dev_has_feat,
+ .dev_feat_enabled = intel_iommu_dev_feat_enabled,
+ .dev_enable_feat = intel_iommu_dev_enable_feat,
+ .dev_disable_feat = intel_iommu_dev_disable_feat,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 03b12d2ee213..2fefeafda437 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -154,8 +154,10 @@ int intel_pasid_alloc_table(struct device *dev)
order = size ? get_order(size) : 0;
pages = alloc_pages_node(info->iommu->node,
GFP_KERNEL | __GFP_ZERO, order);
- if (!pages)
+ if (!pages) {
+ kfree(pasid_table);
return -ENOMEM;
+ }
pasid_table->table = page_address(pages);
pasid_table->order = order;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 3a4b09ae8561..8f87304f915c 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -228,6 +228,7 @@ static LIST_HEAD(global_svm_list);
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
struct mm_struct *mm = NULL;
@@ -291,13 +292,29 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
sdev->dev = dev;
- ret = intel_iommu_enable_pasid(iommu, sdev);
+ ret = intel_iommu_enable_pasid(iommu, dev);
if (ret || !pasid) {
/* If they don't actually want to assign a PASID, this is
* just an enabling check/preparation. */
kfree(sdev);
goto out;
}
+
+ info = dev->archdata.iommu;
+ if (!info || !info->pasid_supported) {
+ kfree(sdev);
+ goto out;
+ }
+
+ sdev->did = FLPT_DEFAULT_DID;
+ sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ if (info->ats_enabled) {
+ sdev->dev_iotlb = 1;
+ sdev->qdep = info->ats_qdep;
+ if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
+ sdev->qdep = 0;
+ }
+
/* Finish the setup now we know we're keeping it */
sdev->users = 1;
sdev->ops = ops;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 2d74641b7f7b..4160aa9f3f80 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -424,7 +424,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
else
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
- PCI_DEVID(dev->bus->number, dev->devfn));
+ pci_dev_id(dev));
return 0;
}
@@ -548,8 +548,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
goto out_free_table;
}
- bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
- sizeof(long), GFP_ATOMIC);
+ bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
goto out_free_pages;
@@ -616,7 +615,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
return 0;
out_free_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
out_free_pages:
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
@@ -640,7 +639,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
}
free_pages((unsigned long)iommu->ir_table->base,
INTR_REMAP_PAGE_ORDER);
- kfree(iommu->ir_table->bitmap);
+ bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 109de67d5d72..67ee6623f9b2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -45,10 +45,6 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
#endif
static bool iommu_dma_strict __read_mostly = true;
-struct iommu_callback_data {
- const struct iommu_ops *ops;
-};
-
struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
@@ -1217,9 +1213,6 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
{
int err;
struct notifier_block *nb;
- struct iommu_callback_data cb = {
- .ops = ops,
- };
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
@@ -1231,7 +1224,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
if (err)
goto out_free;
- err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
+ err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
if (err)
goto out_err;
@@ -1240,7 +1233,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
out_err:
/* Clean up */
- bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
+ bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
bus_unregister_notifier(bus, nb);
out_free:
@@ -2039,3 +2032,203 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+
+/*
+ * Per device IOMMU features.
+ */
+bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_has_feat)
+ return ops->dev_has_feat(dev, feat);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
+
+int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_enable_feat)
+ return ops->dev_enable_feat(dev, feat);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
+
+/*
+ * The device drivers should do the necessary cleanups before calling this.
+ * For example, before disabling the aux-domain feature, the device driver
+ * should detach all aux-domains. Otherwise, this will return -EBUSY.
+ */
+int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_disable_feat)
+ return ops->dev_disable_feat(dev, feat);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
+
+bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_feat_enabled)
+ return ops->dev_feat_enabled(dev, feat);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
+
+/*
+ * Aux-domain specific attach/detach.
+ *
+ * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
+ * true. Also, as long as domains are attached to a device through this
+ * interface, any tries to call iommu_attach_device() should fail
+ * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
+ * This should make us safe against a device being attached to a guest as a
+ * whole while there are still pasid users on it (aux and sva).
+ */
+int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+{
+ int ret = -ENODEV;
+
+ if (domain->ops->aux_attach_dev)
+ ret = domain->ops->aux_attach_dev(domain, dev);
+
+ if (!ret)
+ trace_attach_device_to_domain(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
+
+void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+{
+ if (domain->ops->aux_detach_dev) {
+ domain->ops->aux_detach_dev(domain, dev);
+ trace_detach_device_from_domain(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
+
+int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+ int ret = -ENODEV;
+
+ if (domain->ops->aux_get_pasid)
+ ret = domain->ops->aux_get_pasid(domain, dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
+
+/**
+ * iommu_sva_bind_device() - Bind a process address space to a device
+ * @dev: the device
+ * @mm: the mm to bind, caller must hold a reference to it
+ *
+ * Create a bond between device and address space, allowing the device to access
+ * the mm using the returned PASID. If a bond already exists between @device and
+ * @mm, it is returned and an additional reference is taken. Caller must call
+ * iommu_sva_unbind_device() to release each reference.
+ *
+ * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
+ * initialize the required SVA features.
+ *
+ * On error, returns an ERR_PTR value.
+ */
+struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ struct iommu_group *group;
+ struct iommu_sva *handle = ERR_PTR(-EINVAL);
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_bind)
+ return ERR_PTR(-ENODEV);
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ /* Ensure device count and domain don't change while we're binding */
+ mutex_lock(&group->mutex);
+
+ /*
+ * To keep things simple, SVA currently doesn't support IOMMU groups
+ * with more than one device. Existing SVA-capable systems are not
+ * affected by the problems that required IOMMU groups (lack of ACS
+ * isolation, device ID aliasing and other hardware issues).
+ */
+ if (iommu_group_device_count(group) != 1)
+ goto out_unlock;
+
+ handle = ops->sva_bind(dev, mm, drvdata);
+
+out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
+
+/**
+ * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
+ * @handle: the handle returned by iommu_sva_bind_device()
+ *
+ * Put reference to a bond between device and address space. The device should
+ * not be issuing any more transaction for this PASID. All outstanding page
+ * requests for this PASID must have been flushed to the IOMMU.
+ *
+ * Returns 0 on success, or an error value
+ */
+void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+ struct iommu_group *group;
+ struct device *dev = handle->dev;
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_unbind)
+ return;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return;
+
+ mutex_lock(&group->mutex);
+ ops->sva_unbind(handle);
+ mutex_unlock(&group->mutex);
+
+ iommu_group_put(group);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
+
+int iommu_sva_set_ops(struct iommu_sva *handle,
+ const struct iommu_sva_ops *sva_ops)
+{
+ if (handle->ops && handle->ops != sva_ops)
+ return -EEXIST;
+
+ handle->ops = sva_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
+
+int iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_get_pasid)
+ return IOMMU_PASID_INVALID;
+
+ return ops->sva_get_pasid(handle);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 9fb0eb7a4d02..b4b87d6ae67f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -34,7 +34,7 @@
#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "msm_iommu_hw-8xxx.h"
#include "msm_iommu.h"
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index de3e02277b70..b66d11b0286e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -632,16 +632,20 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (!larbnode)
return -EINVAL;
- if (!of_device_is_available(larbnode))
+ if (!of_device_is_available(larbnode)) {
+ of_node_put(larbnode);
continue;
+ }
ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
if (ret)/* The id is consecutive if there is no this property */
id = i;
plarbdev = of_find_device_by_node(larbnode);
- if (!plarbdev)
+ if (!plarbdev) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
+ }
data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
component_match_add_release(dev, &match, release_of,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 5182c7d6171e..463ee08f7d3a 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
-#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_SECTION)
#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
@@ -146,8 +145,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE)
-#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
- SMMU_PTE_NONSECURE)
static unsigned int iova_pd_index(unsigned long iova)
{
@@ -205,8 +202,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_MATCH_ALL;
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -216,8 +217,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_SECTION(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -227,8 +232,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_GROUP(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -316,6 +325,9 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
/* TODO: free page directory and page tables */
+ WARN_ON_ONCE(as->use_count);
+ kfree(as->count);
+ kfree(as->pts);
kfree(as);
}
@@ -645,6 +657,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
+ u32 pte_attrs;
u32 *pte;
pte = as_get_pte(as, iova, &pte_dma);
@@ -655,8 +668,16 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
if (*pte == 0)
tegra_smmu_pte_get_use(as, iova);
+ pte_attrs = SMMU_PTE_NONSECURE;
+
+ if (prot & IOMMU_READ)
+ pte_attrs |= SMMU_PTE_READABLE;
+
+ if (prot & IOMMU_WRITE)
+ pte_attrs |= SMMU_PTE_WRITABLE;
+
tegra_smmu_set_pte(as, iova, pte, pte_dma,
- __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
+ __phys_to_pfn(paddr) | pte_attrs);
return 0;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 5438abb1baba..cf7984991062 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -160,6 +160,12 @@ config IMGPDC_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config IXP4XX_IRQ
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_MULTI_HANDLER
+ select SPARSE_IRQ
+
config MADERA_IRQ
tristate
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 85972ae1bd7f..f8c66e958a64 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
obj-$(CONFIG_I8259) += irq-i8259.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
+obj-$(CONFIG_IXP4XX_IRQ) += irq-ixp4xx.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
new file mode 100644
index 000000000000..d576809429ac
--- /dev/null
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * irqchip for the IXP4xx interrupt controller
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-ixp4xx.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IXP4XX_ICPR 0x00 /* Interrupt Status */
+#define IXP4XX_ICMR 0x04 /* Interrupt Enable */
+#define IXP4XX_ICLR 0x08 /* Interrupt IRQ/FIQ Select */
+#define IXP4XX_ICIP 0x0C /* IRQ Status */
+#define IXP4XX_ICFP 0x10 /* FIQ Status */
+#define IXP4XX_ICHR 0x14 /* Interrupt Priority */
+#define IXP4XX_ICIH 0x18 /* IRQ Highest Pri Int */
+#define IXP4XX_ICFH 0x1C /* FIQ Highest Pri Int */
+
+/* IXP43x and IXP46x-only */
+#define IXP4XX_ICPR2 0x20 /* Interrupt Status 2 */
+#define IXP4XX_ICMR2 0x24 /* Interrupt Enable 2 */
+#define IXP4XX_ICLR2 0x28 /* Interrupt IRQ/FIQ Select 2 */
+#define IXP4XX_ICIP2 0x2C /* IRQ Status */
+#define IXP4XX_ICFP2 0x30 /* FIQ Status */
+#define IXP4XX_ICEEN 0x34 /* Error High Pri Enable */
+
+/**
+ * struct ixp4xx_irq - state container for the Faraday IRQ controller
+ * @irqbase: IRQ controller memory base in virtual memory
+ * @is_356: if this is an IXP43x, IXP45x or IX46x SoC (with 64 IRQs)
+ * @irqchip: irqchip for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct ixp4xx_irq {
+ void __iomem *irqbase;
+ bool is_356;
+ struct irq_chip irqchip;
+ struct irq_domain *domain;
+};
+
+/* Local static state container */
+static struct ixp4xx_irq ixirq;
+
+/* GPIO Clocks */
+#define IXP4XX_GPIO_CLK_0 14
+#define IXP4XX_GPIO_CLK_1 15
+
+static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ /* All are level active high (asserted) here */
+ if (type != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ return 0;
+}
+
+static void ixp4xx_irq_mask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val &= ~BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val &= ~BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+/*
+ * Level triggered interrupts on GPIO lines can only be cleared when the
+ * interrupt condition disappears.
+ */
+static void ixp4xx_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val |= BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ unsigned long status;
+ int i;
+
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i, regs);
+
+ /*
+ * IXP465/IXP435 has an upper IRQ status register
+ */
+ if (ixi->is_356) {
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i + 32, regs);
+ }
+}
+
+static int ixp4xx_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ixp4xx_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_irq *ixi = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ /*
+ * TODO: after converting IXP4xx to only device tree, set
+ * handle_bad_irq as default handler and assume all consumers
+ * call .set_type() as this is provided in the second cell in
+ * the device tree phandle.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixi->irqchip,
+ ixi,
+ handle_level_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * This needs to be a hierarchical irqdomain to work well with the
+ * GPIO irqchip (which is lower in the hierarchy)
+ */
+static const struct irq_domain_ops ixp4xx_irqdomain_ops = {
+ .translate = ixp4xx_irq_domain_translate,
+ .alloc = ixp4xx_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+/**
+ * ixp4xx_get_irq_domain() - retrieve the ixp4xx irq domain
+ *
+ * This function will go away when we transition to DT probing.
+ */
+struct irq_domain *ixp4xx_get_irq_domain(void)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+
+ return ixi->domain;
+}
+EXPORT_SYMBOL_GPL(ixp4xx_get_irq_domain);
+
+/*
+ * This is the Linux IRQ to hwirq mapping table. This goes away when
+ * we have DT support as all IRQ resources are defined in the device
+ * tree. It will register all the IRQs that are not used by the hierarchical
+ * GPIO IRQ chip. The "holes" inbetween these IRQs will be requested by
+ * the GPIO driver using . This is a step-gap solution.
+ */
+struct ixp4xx_irq_chunk {
+ int irq;
+ int hwirq;
+ int nr_irqs;
+};
+
+static const struct ixp4xx_irq_chunk ixp4xx_irq_chunks[] = {
+ {
+ .irq = 16,
+ .hwirq = 0,
+ .nr_irqs = 6,
+ },
+ {
+ .irq = 24,
+ .hwirq = 8,
+ .nr_irqs = 11,
+ },
+ {
+ .irq = 46,
+ .hwirq = 30,
+ .nr_irqs = 2,
+ },
+ /* Only on the 436 variants */
+ {
+ .irq = 48,
+ .hwirq = 32,
+ .nr_irqs = 10,
+ },
+};
+
+/**
+ * ixp4x_irq_setup() - Common setup code for the IXP4xx interrupt controller
+ * @ixi: State container
+ * @irqbase: Virtual memory base for the interrupt controller
+ * @fwnode: Corresponding fwnode abstraction for this controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+static int ixp4xx_irq_setup(struct ixp4xx_irq *ixi,
+ void __iomem *irqbase,
+ struct fwnode_handle *fwnode,
+ bool is_356)
+{
+ int nr_irqs;
+
+ ixi->irqbase = irqbase;
+ ixi->is_356 = is_356;
+
+ /* Route all sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR);
+
+ /* Disable all interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR);
+
+ if (is_356) {
+ /* Route upper 32 sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR2);
+
+ /* Disable upper 32 interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR2);
+
+ nr_irqs = 64;
+ } else {
+ nr_irqs = 32;
+ }
+
+ ixi->irqchip.name = "IXP4xx";
+ ixi->irqchip.irq_mask = ixp4xx_irq_mask;
+ ixi->irqchip.irq_unmask = ixp4xx_irq_unmask;
+ ixi->irqchip.irq_set_type = ixp4xx_set_irq_type;
+
+ ixi->domain = irq_domain_create_linear(fwnode, nr_irqs,
+ &ixp4xx_irqdomain_ops,
+ ixi);
+ if (!ixi->domain) {
+ pr_crit("IXP4XX: can not add primary irqdomain\n");
+ return -ENODEV;
+ }
+
+ set_handle_irq(ixp4xx_handle_irq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_irq_init() - Function to initialize the irqchip from boardfiles
+ * @irqbase: physical base for the irq controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+void __init ixp4xx_irq_init(resource_size_t irqbase,
+ bool is_356)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ struct irq_fwspec fwspec;
+ int nr_chunks;
+ int ret;
+ int i;
+
+ base = ioremap(irqbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return;
+ }
+ fwnode = irq_domain_alloc_fwnode(base);
+ if (!fwnode) {
+ pr_crit("IXP4XX: no domain handle\n");
+ return;
+ }
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret) {
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+ irq_domain_free_fwnode(fwnode);
+ }
+
+ nr_chunks = ARRAY_SIZE(ixp4xx_irq_chunks);
+ if (!is_356)
+ nr_chunks--;
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ for (i = 0; i < nr_chunks; i++) {
+ const struct ixp4xx_irq_chunk *chunk = &ixp4xx_irq_chunks[i];
+
+ pr_info("Allocate Linux IRQs %d..%d HW IRQs %d..%d\n",
+ chunk->irq, chunk->irq + chunk->nr_irqs - 1,
+ chunk->hwirq, chunk->hwirq + chunk->nr_irqs - 1);
+ fwspec.fwnode = fwnode;
+ fwspec.param[0] = chunk->hwirq;
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(ixi->domain,
+ chunk->irq,
+ chunk->nr_irqs,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ pr_crit("IXP4XX: can not allocate irqs in hierarchy %d\n",
+ ret);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(ixp4xx_irq_init);
+
+#ifdef CONFIG_OF
+int __init ixp4xx_of_init_irq(struct device_node *np,
+ struct device_node *parent)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ bool is_356;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return -ENODEV;
+ }
+ fwnode = of_node_to_fwnode(np);
+
+ /* These chip variants have 64 interrupts */
+ is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp45x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp46x-interrupt");
+
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret)
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+
+ return ret;
+}
+IRQCHIP_DECLARE(ixp42x, "intel,ixp42x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp43x, "intel,ixp43x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp45x, "intel,ixp45x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp46x, "intel,ixp46x-interrupt",
+ ixp4xx_of_init_irq);
+#endif
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index f3000ccb8d35..71be87bdb926 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -619,6 +619,12 @@ config LEDS_TLC591XX
This option enables support for Texas Instruments TLC59108
and TLC59116 LED controllers.
+config LEDS_MAX77650
+ tristate "LED support for Maxim MAX77650 PMIC"
+ depends on LEDS_CLASS && MFD_MAX77650
+ help
+ LEDs driver for MAX77650 family of PMICs from Maxim Integrated.
+
config LEDS_MAX77693
tristate "LED support for MAX77693 Flash"
depends on LEDS_CLASS_FLASH
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 7a8b1f55d459..1e9702ebffee 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
+obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
obj-$(CONFIG_LEDS_MAX77693) += leds-max77693.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
new file mode 100644
index 000000000000..6b74ce9cac12
--- /dev/null
+++ b/drivers/leds/leds-max77650.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// LED driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77650_LED_NUM_LEDS 3
+
+#define MAX77650_LED_A_BASE 0x40
+#define MAX77650_LED_B_BASE 0x43
+
+#define MAX77650_LED_BR_MASK GENMASK(4, 0)
+#define MAX77650_LED_EN_MASK GENMASK(7, 6)
+
+#define MAX77650_LED_MAX_BRIGHTNESS MAX77650_LED_BR_MASK
+
+/* Enable EN_LED_MSTR. */
+#define MAX77650_LED_TOP_DEFAULT BIT(0)
+
+#define MAX77650_LED_ENABLE GENMASK(7, 6)
+#define MAX77650_LED_DISABLE 0x00
+
+#define MAX77650_LED_A_DEFAULT MAX77650_LED_DISABLE
+/* 100% on duty */
+#define MAX77650_LED_B_DEFAULT GENMASK(3, 0)
+
+struct max77650_led {
+ struct led_classdev cdev;
+ struct regmap *map;
+ unsigned int regA;
+ unsigned int regB;
+};
+
+static struct max77650_led *max77650_to_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct max77650_led, cdev);
+}
+
+static int max77650_led_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct max77650_led *led = max77650_to_led(cdev);
+ int val, mask;
+
+ mask = MAX77650_LED_BR_MASK | MAX77650_LED_EN_MASK;
+
+ if (brightness == LED_OFF)
+ val = MAX77650_LED_DISABLE;
+ else
+ val = MAX77650_LED_ENABLE | brightness;
+
+ return regmap_update_bits(led->map, led->regA, mask, val);
+}
+
+static int max77650_led_probe(struct platform_device *pdev)
+{
+ struct device_node *of_node, *child;
+ struct max77650_led *leds, *led;
+ struct device *parent;
+ struct device *dev;
+ struct regmap *map;
+ const char *label;
+ int rv, num_leds;
+ u32 reg;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+ of_node = dev->of_node;
+
+ if (!of_node)
+ return -ENODEV;
+
+ leds = devm_kcalloc(dev, sizeof(*leds),
+ MAX77650_LED_NUM_LEDS, GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ map = dev_get_regmap(dev->parent, NULL);
+ if (!map)
+ return -ENODEV;
+
+ num_leds = of_get_child_count(of_node);
+ if (!num_leds || num_leds > MAX77650_LED_NUM_LEDS)
+ return -ENODEV;
+
+ for_each_child_of_node(of_node, child) {
+ rv = of_property_read_u32(child, "reg", &reg);
+ if (rv || reg >= MAX77650_LED_NUM_LEDS)
+ return -EINVAL;
+
+ led = &leds[reg];
+ led->map = map;
+ led->regA = MAX77650_LED_A_BASE + reg;
+ led->regB = MAX77650_LED_B_BASE + reg;
+ led->cdev.brightness_set_blocking = max77650_led_brightness_set;
+ led->cdev.max_brightness = MAX77650_LED_MAX_BRIGHTNESS;
+
+ label = of_get_property(child, "label", NULL);
+ if (!label) {
+ led->cdev.name = "max77650::";
+ } else {
+ led->cdev.name = devm_kasprintf(dev, GFP_KERNEL,
+ "max77650:%s", label);
+ if (!led->cdev.name)
+ return -ENOMEM;
+ }
+
+ of_property_read_string(child, "linux,default-trigger",
+ &led->cdev.default_trigger);
+
+ rv = devm_of_led_classdev_register(dev, child, &led->cdev);
+ if (rv)
+ return rv;
+
+ rv = regmap_write(map, led->regA, MAX77650_LED_A_DEFAULT);
+ if (rv)
+ return rv;
+
+ rv = regmap_write(map, led->regB, MAX77650_LED_B_DEFAULT);
+ if (rv)
+ return rv;
+ }
+
+ return regmap_write(map,
+ MAX77650_REG_CNFG_LED_TOP,
+ MAX77650_LED_TOP_DEFAULT);
+}
+
+static struct platform_driver max77650_led_driver = {
+ .driver = {
+ .name = "max77650-led",
+ },
+ .probe = max77650_led_probe,
+};
+module_platform_driver(max77650_led_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 LED driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 5f82036fe322..0df7454832ef 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,6 +45,8 @@ struct nvm_dev_map {
int num_ch;
};
+static void nvm_free(struct kref *ref);
+
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -325,6 +327,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
struct nvm_target *t;
struct nvm_tgt_dev *tgt_dev;
void *targetdata;
+ unsigned int mdts;
int ret;
switch (create->conf.type) {
@@ -412,8 +415,12 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tdisk->private_data = targetdata;
tqueue->queuedata = targetdata;
- blk_queue_max_hw_sectors(tqueue,
- (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
+ mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
+ if (dev->geo.mdts) {
+ mdts = min_t(u32, dev->geo.mdts,
+ (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
+ }
+ blk_queue_max_hw_sectors(tqueue, mdts);
set_capacity(tdisk, tt->capacity(targetdata));
add_disk(tdisk);
@@ -476,7 +483,6 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
/**
* nvm_remove_tgt - Removes a target from the media manager
- * @dev: device
* @remove: ioctl structure with target name to remove.
*
* Returns:
@@ -484,18 +490,28 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
* 1: on not found
* <0: on error
*/
-static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
+static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
{
struct nvm_target *t;
+ struct nvm_dev *dev;
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, remove->tgtname);
- if (!t) {
+ down_read(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ t = nvm_find_target(dev, remove->tgtname);
+ if (t) {
+ mutex_unlock(&dev->mlock);
+ break;
+ }
mutex_unlock(&dev->mlock);
- return 1;
}
+ up_read(&nvm_lock);
+
+ if (!t)
+ return 1;
+
__nvm_remove_target(t, true);
- mutex_unlock(&dev->mlock);
+ kref_put(&dev->ref, nvm_free);
return 0;
}
@@ -1089,15 +1105,16 @@ err_fmtype:
return ret;
}
-static void nvm_free(struct nvm_dev *dev)
+static void nvm_free(struct kref *ref)
{
- if (!dev)
- return;
+ struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
if (dev->dma_pool)
dev->ops->destroy_dma_pool(dev->dma_pool);
- nvm_unregister_map(dev);
+ if (dev->rmap)
+ nvm_unregister_map(dev);
+
kfree(dev->lun_map);
kfree(dev);
}
@@ -1134,7 +1151,13 @@ err:
struct nvm_dev *nvm_alloc_dev(int node)
{
- return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
+ struct nvm_dev *dev;
+
+ dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
+ if (dev)
+ kref_init(&dev->ref);
+
+ return dev;
}
EXPORT_SYMBOL(nvm_alloc_dev);
@@ -1142,12 +1165,16 @@ int nvm_register(struct nvm_dev *dev)
{
int ret, exp_pool_size;
- if (!dev->q || !dev->ops)
+ if (!dev->q || !dev->ops) {
+ kref_put(&dev->ref, nvm_free);
return -EINVAL;
+ }
ret = nvm_init(dev);
- if (ret)
+ if (ret) {
+ kref_put(&dev->ref, nvm_free);
return ret;
+ }
exp_pool_size = max_t(int, PAGE_SIZE,
(NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
@@ -1157,7 +1184,7 @@ int nvm_register(struct nvm_dev *dev)
exp_pool_size);
if (!dev->dma_pool) {
pr_err("nvm: could not create dma pool\n");
- nvm_free(dev);
+ kref_put(&dev->ref, nvm_free);
return -ENOMEM;
}
@@ -1179,6 +1206,7 @@ void nvm_unregister(struct nvm_dev *dev)
if (t->dev->parent != dev)
continue;
__nvm_remove_target(t, false);
+ kref_put(&dev->ref, nvm_free);
}
mutex_unlock(&dev->mlock);
@@ -1186,13 +1214,14 @@ void nvm_unregister(struct nvm_dev *dev)
list_del(&dev->devices);
up_write(&nvm_lock);
- nvm_free(dev);
+ kref_put(&dev->ref, nvm_free);
}
EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
+ int ret;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1203,7 +1232,12 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- return nvm_create_tgt(dev, create);
+ kref_get(&dev->ref);
+ ret = nvm_create_tgt(dev, create);
+ if (ret)
+ kref_put(&dev->ref, nvm_free);
+
+ return ret;
}
static long nvm_ioctl_info(struct file *file, void __user *arg)
@@ -1322,8 +1356,6 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
{
struct nvm_ioctl_remove remove;
- struct nvm_dev *dev;
- int ret = 0;
if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
return -EFAULT;
@@ -1335,13 +1367,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
return -EINVAL;
}
- list_for_each_entry(dev, &nvm_devices, devices) {
- ret = nvm_remove_tgt(dev, &remove);
- if (!ret)
- break;
- }
-
- return ret;
+ return nvm_remove_tgt(&remove);
}
/* kept for compatibility reasons */
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index c9fa26f95659..5c1034c22197 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -18,7 +18,8 @@
#include "pblk.h"
-int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
+void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
+ unsigned long flags)
{
struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
@@ -43,6 +44,7 @@ retry:
goto retry;
case NVM_IO_ERR:
pblk_pipeline_stop(pblk);
+ bio_io_error(bio);
goto out;
}
@@ -79,7 +81,9 @@ retry:
out:
generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
- return ret;
+
+ if (ret == NVM_IO_DONE)
+ bio_endio(bio);
}
/*
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 6ca868868fee..773537804319 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -562,11 +562,9 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int ret;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io_sync(pblk, rqd);
pblk_up_chunk(pblk, ppa_list[0]);
@@ -725,6 +723,7 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = pblk_line_smeta_start(pblk, line);
int i, ret;
@@ -748,9 +747,10 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
@@ -761,8 +761,10 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
atomic_dec(&pblk->inflight_io);
- if (rqd.error)
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
pblk_log_read_err(pblk, &rqd);
+ ret = -EIO;
+ }
clear_rqd:
pblk_free_rqd_meta(pblk, &rqd);
@@ -775,6 +777,7 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
@@ -799,12 +802,13 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta = pblk_get_meta(pblk,
rqd.meta_list, i);
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
meta->lba = lba_list[paddr] = addr_empty;
}
@@ -834,8 +838,9 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- void *ppa_list, *meta_list;
+ void *ppa_list_buf, *meta_list;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = line->emeta_ssec;
dma_addr_t dma_ppa_list, dma_meta_list;
@@ -851,7 +856,7 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
if (!meta_list)
return -ENOMEM;
- ppa_list = meta_list + pblk_dma_meta_size(pblk);
+ ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
next_rq:
@@ -872,11 +877,12 @@ next_rq:
rqd.bio = bio;
rqd.meta_list = meta_list;
- rqd.ppa_list = ppa_list;
+ rqd.ppa_list = ppa_list_buf;
rqd.dma_meta_list = dma_meta_list;
rqd.dma_ppa_list = dma_ppa_list;
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = rq_ppas;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
@@ -904,7 +910,7 @@ next_rq:
}
for (j = 0; j < min; j++, i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
}
ret = pblk_submit_io_sync(pblk, &rqd);
@@ -916,8 +922,11 @@ next_rq:
atomic_dec(&pblk->inflight_io);
- if (rqd.error)
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
pblk_log_read_err(pblk, &rqd);
+ ret = -EIO;
+ goto free_rqd_dma;
+ }
emeta_buf += rq_len;
left_ppas -= rq_ppas;
@@ -1162,7 +1171,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
off = bit * geo->ws_opt;
bitmap_set(line->map_bitmap, off, lm->smeta_sec);
line->sec_in_line -= lm->smeta_sec;
- line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
if (init && pblk_line_smeta_write(pblk, line, off)) {
@@ -1521,11 +1529,9 @@ void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int i;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
for (i = 0; i < rqd->nr_ppas; i++)
pblk_ppa_to_line_put(pblk, ppa_list[i]);
}
@@ -1699,6 +1705,14 @@ static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
+ if (line->w_err_gc->has_gc_err) {
+ spin_unlock(&line->lock);
+ pblk_err(pblk, "line %d had errors during GC\n", line->id);
+ pblk_put_line_back(pblk, line);
+ line->w_err_gc->has_gc_err = 0;
+ return;
+ }
+
line->state = PBLK_LINESTATE_FREE;
trace_pblk_line_state(pblk_disk_name(pblk), line->id,
line->state);
@@ -2023,7 +2037,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
struct ppa_addr ppa_l2p;
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return;
}
@@ -2063,7 +2077,7 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
#endif
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return 0;
}
@@ -2109,7 +2123,7 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
}
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return;
}
@@ -2135,8 +2149,8 @@ out:
spin_unlock(&pblk->trans_lock);
}
-void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
- sector_t blba, int nr_secs)
+int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
+ sector_t blba, int nr_secs, bool *from_cache)
{
int i;
@@ -2150,10 +2164,19 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
+ if (i > 0 && *from_cache)
+ break;
+ *from_cache = false;
+
kref_get(&line->ref);
+ } else {
+ if (i > 0 && !*from_cache)
+ break;
+ *from_cache = true;
}
}
spin_unlock(&pblk->trans_lock);
+ return i;
}
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
@@ -2167,7 +2190,7 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
lba = lba_list[i];
if (lba != ADDR_EMPTY) {
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
continue;
}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 26a52ea7ec45..63ee205b41c4 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -59,24 +59,28 @@ static void pblk_gc_writer_kick(struct pblk_gc *gc)
wake_up_process(gc->gc_writer_ts);
}
-static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
+void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct list_head *move_list;
+ spin_lock(&l_mg->gc_lock);
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_CLOSED;
trace_pblk_line_state(pblk_disk_name(pblk), line->id,
line->state);
+
+ /* We need to reset gc_group in order to ensure that
+ * pblk_line_gc_list will return proper move_list
+ * since right now current line is not on any of the
+ * gc lists.
+ */
+ line->gc_group = PBLK_LINEGC_NONE;
move_list = pblk_line_gc_list(pblk, line);
spin_unlock(&line->lock);
-
- if (move_list) {
- spin_lock(&l_mg->gc_lock);
- list_add_tail(&line->list, move_list);
- spin_unlock(&l_mg->gc_lock);
- }
+ list_add_tail(&line->list, move_list);
+ spin_unlock(&l_mg->gc_lock);
}
static void pblk_gc_line_ws(struct work_struct *work)
@@ -84,8 +88,6 @@ static void pblk_gc_line_ws(struct work_struct *work)
struct pblk_line_ws *gc_rq_ws = container_of(work,
struct pblk_line_ws, ws);
struct pblk *pblk = gc_rq_ws->pblk;
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line *line = gc_rq_ws->line;
struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
@@ -93,18 +95,10 @@ static void pblk_gc_line_ws(struct work_struct *work)
up(&gc->gc_sem);
- gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
- if (!gc_rq->data) {
- pblk_err(pblk, "could not GC line:%d (%d/%d)\n",
- line->id, *line->vsc, gc_rq->nr_secs);
- goto out;
- }
-
/* Read from GC victim block */
ret = pblk_submit_read_gc(pblk, gc_rq);
if (ret) {
- pblk_err(pblk, "failed GC read in line:%d (err:%d)\n",
- line->id, ret);
+ line->w_err_gc->has_gc_err = 1;
goto out;
}
@@ -189,6 +183,8 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
struct pblk_line *line = line_ws->line;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line_ws *gc_rq_ws;
struct pblk_gc_rq *gc_rq;
@@ -247,9 +243,13 @@ next_rq:
gc_rq->nr_secs = nr_secs;
gc_rq->line = line;
+ gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
+ if (!gc_rq->data)
+ goto fail_free_gc_rq;
+
gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
if (!gc_rq_ws)
- goto fail_free_gc_rq;
+ goto fail_free_gc_data;
gc_rq_ws->pblk = pblk;
gc_rq_ws->line = line;
@@ -281,6 +281,8 @@ out:
return;
+fail_free_gc_data:
+ vfree(gc_rq->data);
fail_free_gc_rq:
kfree(gc_rq);
fail_free_lba_list:
@@ -290,8 +292,11 @@ fail_free_invalid_bitmap:
fail_free_ws:
kfree(line_ws);
+ /* Line goes back to closed state, so we cannot release additional
+ * reference for line, since we do that only when we want to do
+ * gc to free line state transition.
+ */
pblk_put_line_back(pblk, line);
- kref_put(&line->ref, pblk_line_put);
atomic_dec(&gc->read_inflight_gc);
pblk_err(pblk, "failed to GC line %d\n", line->id);
@@ -355,8 +360,13 @@ static int pblk_gc_read(struct pblk *pblk)
pblk_gc_kick(pblk);
- if (pblk_gc_line(pblk, line))
+ if (pblk_gc_line(pblk, line)) {
pblk_err(pblk, "failed to GC line %d\n", line->id);
+ /* rollback */
+ spin_lock(&gc->r_lock);
+ list_add_tail(&line->list, &gc->r_list);
+ spin_unlock(&gc->r_lock);
+ }
return 0;
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 8b643d0bffae..b351c7f002de 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -47,33 +47,6 @@ static struct pblk_global_caches pblk_caches = {
struct bio_set pblk_bio_set;
-static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
- struct bio *bio)
-{
- int ret;
-
- /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
- * constraint. Writes can be of arbitrary size.
- */
- if (bio_data_dir(bio) == READ) {
- blk_queue_split(q, &bio);
- ret = pblk_submit_read(pblk, bio);
- if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
- bio_put(bio);
-
- return ret;
- }
-
- /* Prevent deadlock in the case of a modest LUN configuration and large
- * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
- * available for user I/O.
- */
- if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
- blk_queue_split(q, &bio);
-
- return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
-}
-
static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
{
struct pblk *pblk = q->queuedata;
@@ -86,13 +59,21 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
}
}
- switch (pblk_rw_io(q, pblk, bio)) {
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
+ /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
+ * constraint. Writes can be of arbitrary size.
+ */
+ if (bio_data_dir(bio) == READ) {
+ blk_queue_split(q, &bio);
+ pblk_submit_read(pblk, bio);
+ } else {
+ /* Prevent deadlock in the case of a modest LUN configuration
+ * and large user I/Os. Unless stalled, the rate limiter
+ * leaves at least 256KB available for user I/O.
+ */
+ if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
+ blk_queue_split(q, &bio);
+
+ pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
return BLK_QC_T_NONE;
@@ -105,7 +86,7 @@ static size_t pblk_trans_map_size(struct pblk *pblk)
if (pblk->addrf_len < 32)
entry_size = 4;
- return entry_size * pblk->rl.nr_secs;
+ return entry_size * pblk->capacity;
}
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -164,13 +145,18 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
int ret = 0;
map_size = pblk_trans_map_size(pblk);
- pblk->trans_map = vmalloc(map_size);
- if (!pblk->trans_map)
+ pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
+ | __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ if (!pblk->trans_map) {
+ pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
+ map_size);
return -ENOMEM;
+ }
pblk_ppa_set_empty(&ppa);
- for (i = 0; i < pblk->rl.nr_secs; i++)
+ for (i = 0; i < pblk->capacity; i++)
pblk_trans_map_set(pblk, i, ppa);
ret = pblk_l2p_recover(pblk, factory_init);
@@ -701,7 +687,6 @@ static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_chks;
- pblk->rl.nr_secs = nr_free_chks * geo->clba;
/* Consider sectors used for metadata */
sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
@@ -1284,7 +1269,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
geo->all_luns, pblk->l_mg.nr_lines,
- (unsigned long long)pblk->rl.nr_secs,
+ (unsigned long long)pblk->capacity,
pblk->rwb.nr_entries);
wake_up_process(pblk->writer_ts);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 7fbc99b60cac..5408e32b2f13 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -162,6 +162,7 @@ int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
*erase_ppa = ppa_list[i];
erase_ppa->a.blk = e_line->id;
+ erase_ppa->a.reserved = 0;
spin_unlock(&e_line->lock);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 03c241b340ea..5abb1705b039 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -642,7 +642,7 @@ try:
* be directed to disk.
*/
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter, bool advanced_bio)
+ struct ppa_addr ppa)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -673,15 +673,6 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
ret = 0;
goto out;
}
-
- /* Only advance the bio if it hasn't been advanced already. If advanced,
- * this bio is at least a partial bio (i.e., it has partially been
- * filled with data from the cache). If part of the data resides on the
- * media, we will read later on
- */
- if (unlikely(!advanced_bio))
- bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
-
data = bio_data(bio);
memcpy(data, entry->data, rb->seg_size);
@@ -799,8 +790,8 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
}
out:
- spin_unlock(&rb->w_lock);
spin_unlock_irq(&rb->s_lock);
+ spin_unlock(&rb->w_lock);
return ret;
}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 0b7d5fb4548d..d98ea392fe33 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,8 +26,7 @@
* issued.
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
- sector_t lba, struct ppa_addr ppa,
- int bio_iter, bool advanced_bio)
+ sector_t lba, struct ppa_addr ppa)
{
#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
@@ -35,73 +34,75 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
- bio_iter, advanced_bio);
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
}
-static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
+static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct bio *bio, sector_t blba,
- unsigned long *read_bitmap)
+ bool *from_cache)
{
void *meta_list = rqd->meta_list;
- struct ppa_addr ppas[NVM_MAX_VLBA];
- int nr_secs = rqd->nr_ppas;
- bool advanced_bio = false;
- int i, j = 0;
+ int nr_secs, i;
- pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
+retry:
+ nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
+ from_cache);
+
+ if (!*from_cache)
+ goto end;
for (i = 0; i < nr_secs; i++) {
- struct ppa_addr p = ppas[i];
struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
sector_t lba = blba + i;
-retry:
- if (pblk_ppa_empty(p)) {
+ if (pblk_ppa_empty(rqd->ppa_list[i])) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
- WARN_ON(test_and_set_bit(i, read_bitmap));
meta->lba = addr_empty;
-
- if (unlikely(!advanced_bio)) {
- bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
- advanced_bio = true;
+ } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
+ /*
+ * Try to read from write buffer. The address is later
+ * checked on the write buffer to prevent retrieving
+ * overwritten data.
+ */
+ if (!pblk_read_from_cache(pblk, bio, lba,
+ rqd->ppa_list[i])) {
+ if (i == 0) {
+ /*
+ * We didn't call with bio_advance()
+ * yet, so we can just retry.
+ */
+ goto retry;
+ } else {
+ /*
+ * We already call bio_advance()
+ * so we cannot retry and we need
+ * to quit that function in order
+ * to allow caller to handle the bio
+ * splitting in the current sector
+ * position.
+ */
+ nr_secs = i;
+ goto end;
+ }
}
-
- goto next;
- }
-
- /* Try to read from write buffer. The address is later checked
- * on the write buffer to prevent retrieving overwritten data.
- */
- if (pblk_addr_in_cache(p)) {
- if (!pblk_read_from_cache(pblk, bio, lba, p, i,
- advanced_bio)) {
- pblk_lookup_l2p_seq(pblk, &p, lba, 1);
- goto retry;
- }
- WARN_ON(test_and_set_bit(i, read_bitmap));
meta->lba = cpu_to_le64(lba);
- advanced_bio = true;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
- } else {
- /* Read from media non-cached sectors */
- rqd->ppa_list[j++] = p;
}
-
-next:
- if (advanced_bio)
- bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
+ bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
+end:
if (pblk_io_aligned(pblk, nr_secs))
rqd->is_seq = 1;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
#endif
+
+ return nr_secs;
}
@@ -175,12 +176,12 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
}
-static void pblk_end_user_read(struct bio *bio)
+static void pblk_end_user_read(struct bio *bio, int error)
{
-#ifdef CONFIG_NVM_PBLK_DEBUG
- WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
-#endif
- bio_endio(bio);
+ if (error && error != NVM_RSP_WARN_HIGHECC)
+ bio_io_error(bio);
+ else
+ bio_endio(bio);
}
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
@@ -197,9 +198,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
pblk_log_read_err(pblk, rqd);
pblk_read_check_seq(pblk, rqd, r_ctx->lba);
-
- if (int_bio)
- bio_put(int_bio);
+ bio_put(int_bio);
if (put_line)
pblk_rq_to_line_put(pblk, rqd);
@@ -219,188 +218,17 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = (struct bio *)r_ctx->private;
- pblk_end_user_read(bio);
+ pblk_end_user_read(bio, rqd->error);
__pblk_end_io_read(pblk, rqd, true);
}
-static void pblk_end_partial_read(struct nvm_rq *rqd)
-{
- struct pblk *pblk = rqd->private;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
- struct pblk_pr_ctx *pr_ctx = r_ctx->private;
- struct pblk_sec_meta *meta;
- struct bio *new_bio = rqd->bio;
- struct bio *bio = pr_ctx->orig_bio;
- void *meta_list = rqd->meta_list;
- unsigned long *read_bitmap = pr_ctx->bitmap;
- struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
- struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
- int nr_secs = pr_ctx->orig_nr_secs;
- int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
- void *src_p, *dst_p;
- int bit, i;
-
- if (unlikely(nr_holes == 1)) {
- struct ppa_addr ppa;
-
- ppa = rqd->ppa_addr;
- rqd->ppa_list = pr_ctx->ppa_ptr;
- rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
- rqd->ppa_list[0] = ppa;
- }
-
- for (i = 0; i < nr_secs; i++) {
- meta = pblk_get_meta(pblk, meta_list, i);
- pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]);
- }
-
- /* Fill the holes in the original bio */
- i = 0;
- for (bit = 0; bit < nr_secs; bit++) {
- if (!test_bit(bit, read_bitmap)) {
- struct bio_vec dst_bv, src_bv;
- struct pblk_line *line;
-
- line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
- kref_put(&line->ref, pblk_line_put);
-
- meta = pblk_get_meta(pblk, meta_list, bit);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
-
- dst_bv = bio_iter_iovec(bio, orig_iter);
- src_bv = bio_iter_iovec(new_bio, new_iter);
-
- src_p = kmap_atomic(src_bv.bv_page);
- dst_p = kmap_atomic(dst_bv.bv_page);
-
- memcpy(dst_p + dst_bv.bv_offset,
- src_p + src_bv.bv_offset,
- PBLK_EXPOSED_PAGE_SIZE);
-
- kunmap_atomic(src_p);
- kunmap_atomic(dst_p);
-
- flush_dcache_page(dst_bv.bv_page);
- mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
-
- bio_advance_iter(new_bio, &new_iter,
- PBLK_EXPOSED_PAGE_SIZE);
- i++;
- }
- bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
- }
-
- bio_put(new_bio);
- kfree(pr_ctx);
-
- /* restore original request */
- rqd->bio = NULL;
- rqd->nr_ppas = nr_secs;
-
- bio_endio(bio);
- __pblk_end_io_read(pblk, rqd, false);
-}
-
-static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap,
- int nr_holes)
-{
- void *meta_list = rqd->meta_list;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
- struct pblk_pr_ctx *pr_ctx;
- struct bio *new_bio, *bio = r_ctx->private;
- int nr_secs = rqd->nr_ppas;
- int i;
-
- new_bio = bio_alloc(GFP_KERNEL, nr_holes);
-
- if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
- goto fail_bio_put;
-
- if (nr_holes != new_bio->bi_vcnt) {
- WARN_ONCE(1, "pblk: malformed bio\n");
- goto fail_free_pages;
- }
-
- pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
- if (!pr_ctx)
- goto fail_free_pages;
-
- for (i = 0; i < nr_secs; i++) {
- struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-
- pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba);
- }
-
- new_bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
-
- rqd->bio = new_bio;
- rqd->nr_ppas = nr_holes;
-
- pr_ctx->orig_bio = bio;
- bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
- pr_ctx->bio_init_idx = bio_init_idx;
- pr_ctx->orig_nr_secs = nr_secs;
- r_ctx->private = pr_ctx;
-
- if (unlikely(nr_holes == 1)) {
- pr_ctx->ppa_ptr = rqd->ppa_list;
- pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
- rqd->ppa_addr = rqd->ppa_list[0];
- }
- return 0;
-
-fail_free_pages:
- pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
-fail_bio_put:
- bio_put(new_bio);
-
- return -ENOMEM;
-}
-
-static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap, int nr_secs)
-{
- int nr_holes;
- int ret;
-
- nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
-
- if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
- nr_holes))
- return NVM_IO_ERR;
-
- rqd->end_io = pblk_end_partial_read;
-
- ret = pblk_submit_io(pblk, rqd);
- if (ret) {
- bio_put(rqd->bio);
- pblk_err(pblk, "partial read IO submission failed\n");
- goto err;
- }
-
- return NVM_IO_OK;
-
-err:
- pblk_err(pblk, "failed to perform partial read\n");
-
- /* Free allocated pages in new bio */
- pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
- __pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_ERR;
-}
-
static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
- sector_t lba, unsigned long *read_bitmap)
+ sector_t lba, bool *from_cache)
{
struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
struct ppa_addr ppa;
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->inflight_reads);
@@ -410,7 +238,6 @@ retry:
if (pblk_ppa_empty(ppa)) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
- WARN_ON(test_and_set_bit(0, read_bitmap));
meta->lba = addr_empty;
return;
}
@@ -419,12 +246,11 @@ retry:
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
goto retry;
}
- WARN_ON(test_and_set_bit(0, read_bitmap));
meta->lba = cpu_to_le64(lba);
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -435,95 +261,92 @@ retry:
}
}
-int pblk_submit_read(struct pblk *pblk, struct bio *bio)
+void pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
+ bool from_cache;
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
- unsigned int bio_init_idx;
- DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
- int ret = NVM_IO_ERR;
+ struct bio *int_bio, *split_bio;
generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
&pblk->disk->part0);
- bitmap_zero(read_bitmap, nr_secs);
-
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
rqd->opcode = NVM_OP_PREAD;
rqd->nr_ppas = nr_secs;
- rqd->bio = NULL; /* cloned bio if needed */
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
r_ctx->start_time = jiffies;
r_ctx->lba = blba;
- r_ctx->private = bio; /* original bio */
- /* Save the index for this bio's start. This is needed in case
- * we need to fill a partial read.
- */
- bio_init_idx = pblk_get_bi_idx(bio);
+ if (pblk_alloc_rqd_meta(pblk, rqd)) {
+ bio_io_error(bio);
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
+ return;
+ }
- if (pblk_alloc_rqd_meta(pblk, rqd))
- goto fail_rqd_free;
+ /* Clone read bio to deal internally with:
+ * -read errors when reading from drive
+ * -bio_advance() calls during cache reads
+ */
+ int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (nr_secs > 1)
- pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
+ nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
+ &from_cache);
else
- pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
+ pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
- if (bitmap_full(read_bitmap, nr_secs)) {
+split_retry:
+ r_ctx->private = bio; /* original bio */
+ rqd->bio = int_bio; /* internal bio */
+
+ if (from_cache && nr_secs == rqd->nr_ppas) {
+ /* All data was read from cache, we can complete the IO. */
+ pblk_end_user_read(bio, 0);
atomic_inc(&pblk->inflight_io);
__pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_DONE;
- }
-
- /* All sectors are to be read from the device */
- if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
- struct bio *int_bio = NULL;
+ } else if (nr_secs != rqd->nr_ppas) {
+ /* The read bio request could be partially filled by the write
+ * buffer, but there are some holes that need to be read from
+ * the drive. In order to handle this, we will use block layer
+ * mechanism to split this request in to smaller ones and make
+ * a chain of it.
+ */
+ split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
+ &pblk_bio_set);
+ bio_chain(split_bio, bio);
+ generic_make_request(bio);
+
+ /* New bio contains first N sectors of the previous one, so
+ * we can continue to use existing rqd, but we need to shrink
+ * the number of PPAs in it. New bio is also guaranteed that
+ * it contains only either data from cache or from drive, newer
+ * mix of them.
+ */
+ bio = split_bio;
+ rqd->nr_ppas = nr_secs;
+ if (rqd->nr_ppas == 1)
+ rqd->ppa_addr = rqd->ppa_list[0];
- /* Clone read bio to deal with read errors internally */
+ /* Recreate int_bio - existing might have some needed internal
+ * fields modified already.
+ */
+ bio_put(int_bio);
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
- if (!int_bio) {
- pblk_err(pblk, "could not clone read bio\n");
- goto fail_end_io;
- }
-
- rqd->bio = int_bio;
-
- if (pblk_submit_io(pblk, rqd)) {
- pblk_err(pblk, "read IO submission failed\n");
- ret = NVM_IO_ERR;
- goto fail_end_io;
- }
-
- return NVM_IO_OK;
+ goto split_retry;
+ } else if (pblk_submit_io(pblk, rqd)) {
+ /* Submitting IO to drive failed, let's report an error */
+ rqd->error = -ENODEV;
+ pblk_end_io_read(rqd);
}
-
- /* The read bio request could be partially filled by the write buffer,
- * but there are some holes that need to be read from the drive.
- */
- ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
- nr_secs);
- if (ret)
- goto fail_meta_free;
-
- return NVM_IO_OK;
-
-fail_meta_free:
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
-fail_rqd_free:
- pblk_free_rqd(pblk, rqd, PBLK_READ);
- return ret;
-fail_end_io:
- __pblk_end_io_read(pblk, rqd, false);
- return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
@@ -568,7 +391,7 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
goto out;
/* logic error: lba out-of-bounds */
- if (lba >= pblk->rl.nr_secs) {
+ if (lba >= pblk->capacity) {
WARN(1, "pblk: read lba out of bounds\n");
goto out;
}
@@ -642,7 +465,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (pblk_submit_io_sync(pblk, &rqd)) {
ret = -EIO;
- pblk_err(pblk, "GC read request failed\n");
goto err_free_bio;
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index d86f580036d3..e6dda04de144 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -93,10 +93,24 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
u64 written_secs)
{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
int i;
for (i = 0; i < written_secs; i += pblk->min_write_pgs)
- pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+ __pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+
+ spin_lock(&l_mg->free_lock);
+ if (written_secs > line->left_msecs) {
+ /*
+ * We have all data sectors written
+ * and some emeta sectors written too.
+ */
+ line->left_msecs = 0;
+ } else {
+ /* We have only some data sectors written. */
+ line->left_msecs -= written_secs;
+ }
+ spin_unlock(&l_mg->free_lock);
}
static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
@@ -165,6 +179,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
struct bio *bio;
+ struct ppa_addr *ppa_list;
void *data;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
u64 w_ptr = line->cur_sec;
@@ -194,7 +209,7 @@ next_pad_rq:
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
if (rq_ppas < pblk->min_write_pgs) {
pblk_err(pblk, "corrupted pad line %d\n", line->id);
- goto fail_free_pad;
+ goto fail_complete;
}
rq_len = rq_ppas * geo->csecs;
@@ -203,7 +218,7 @@ next_pad_rq:
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto fail_free_pad;
+ goto fail_complete;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -212,8 +227,11 @@ next_pad_rq:
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
ret = pblk_alloc_rqd_meta(pblk, rqd);
- if (ret)
- goto fail_free_rqd;
+ if (ret) {
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
+ bio_put(bio);
+ goto fail_complete;
+ }
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
@@ -222,6 +240,7 @@ next_pad_rq:
rqd->end_io = pblk_end_io_recov;
rqd->private = pad_rq;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
meta_list = rqd->meta_list;
for (i = 0; i < rqd->nr_ppas; ) {
@@ -249,18 +268,21 @@ next_pad_rq:
lba_list[w_ptr] = addr_empty;
meta = pblk_get_meta(pblk, meta_list, i);
meta->lba = addr_empty;
- rqd->ppa_list[i] = dev_ppa;
+ ppa_list[i] = dev_ppa;
}
}
kref_get(&pad_rq->ref);
- pblk_down_chunk(pblk, rqd->ppa_list[0]);
+ pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
- pblk_up_chunk(pblk, rqd->ppa_list[0]);
- goto fail_free_rqd;
+ pblk_up_chunk(pblk, ppa_list[0]);
+ kref_put(&pad_rq->ref, pblk_recov_complete);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
+ bio_put(bio);
+ goto fail_complete;
}
left_line_ppas -= rq_ppas;
@@ -268,13 +290,9 @@ next_pad_rq:
if (left_ppas && left_line_ppas)
goto next_pad_rq;
+fail_complete:
kref_put(&pad_rq->ref, pblk_recov_complete);
-
- if (!wait_for_completion_io_timeout(&pad_rq->wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pblk_err(pblk, "pad write timed out\n");
- ret = -ETIME;
- }
+ wait_for_completion(&pad_rq->wait);
if (!pblk_line_is_full(line))
pblk_err(pblk, "corrupted padded line: %d\n", line->id);
@@ -283,14 +301,6 @@ next_pad_rq:
free_rq:
kfree(pad_rq);
return ret;
-
-fail_free_rqd:
- pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
- bio_put(bio);
-fail_free_pad:
- kfree(pad_rq);
- vfree(data);
- return ret;
}
static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
@@ -412,6 +422,7 @@ retry_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
if (pblk_io_aligned(pblk, rq_ppas))
rqd->is_seq = 1;
@@ -430,7 +441,7 @@ retry_rq:
}
for (j = 0; j < pblk->min_write_pgs; j++, i++)
- rqd->ppa_list[i] =
+ ppa_list[i] =
addr_to_gen_ppa(pblk, paddr + j, line->id);
}
@@ -444,7 +455,7 @@ retry_rq:
atomic_dec(&pblk->inflight_io);
/* If a read fails, do a best effort by padding the line and retrying */
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
int pad_distance, ret;
if (padded) {
@@ -474,11 +485,11 @@ retry_rq:
lba_list[paddr++] = cpu_to_le64(lba);
- if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
+ if (lba == ADDR_EMPTY || lba >= pblk->capacity)
continue;
line->nr_valid_lbas++;
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
+ pblk_update_map(pblk, lba, ppa_list[i]);
}
left_ppas -= rq_ppas;
@@ -647,10 +658,12 @@ static int pblk_line_was_written(struct pblk_line *line,
bppa = pblk->luns[smeta_blk].bppa;
chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
- if (chunk->state & NVM_CHK_ST_FREE)
- return 0;
+ if (chunk->state & NVM_CHK_ST_CLOSED ||
+ (chunk->state & NVM_CHK_ST_OPEN
+ && chunk->wp >= lm->smeta_sec))
+ return 1;
- return 1;
+ return 0;
}
static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
@@ -844,6 +857,7 @@ next:
spin_unlock(&l_mg->free_lock);
} else {
spin_lock(&l_mg->free_lock);
+ l_mg->data_line = data_line;
/* Allocate next line for preparation */
l_mg->data_next = pblk_line_get(pblk);
if (l_mg->data_next) {
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6593deab52da..4e63f9b5954c 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -228,6 +228,7 @@ static void pblk_submit_rec(struct work_struct *work)
mempool_free(recovery, &pblk->rec_pool);
atomic_dec(&pblk->inflight_io);
+ pblk_write_kick(pblk);
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index ac3ab778e976..a67855387f53 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -43,8 +43,6 @@
#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
-#define PBLK_COMMAND_TIMEOUT_MS 30000
-
/* Max 512 LUNs per device */
#define PBLK_MAX_LUNS_BITMAP (4)
@@ -123,18 +121,6 @@ struct pblk_g_ctx {
u64 lba;
};
-/* partial read context */
-struct pblk_pr_ctx {
- struct bio *orig_bio;
- DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
- unsigned int orig_nr_secs;
- unsigned int bio_init_idx;
- void *ppa_ptr;
- dma_addr_t dma_ppa_list;
- u64 lba_list_mem[NVM_MAX_VLBA];
- u64 lba_list_media[NVM_MAX_VLBA];
-};
-
/* Pad context */
struct pblk_pad_rq {
struct pblk *pblk;
@@ -305,7 +291,6 @@ struct pblk_rl {
struct timer_list u_timer;
- unsigned long long nr_secs;
unsigned long total_blocks;
atomic_t free_blocks; /* Total number of free blocks (+ OP) */
@@ -440,6 +425,7 @@ struct pblk_smeta {
struct pblk_w_err_gc {
int has_write_err;
+ int has_gc_err;
__le64 *lba_list;
};
@@ -465,7 +451,6 @@ struct pblk_line {
int meta_line; /* Metadata line id */
int meta_distance; /* Distance between data and metadata */
- u64 smeta_ssec; /* Sector where smeta starts */
u64 emeta_ssec; /* Sector where emeta starts */
unsigned int sec_in_line; /* Number of usable secs in line */
@@ -762,7 +747,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
unsigned int pos, unsigned int nr_entries,
unsigned int count);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter, bool advanced_bio);
+ struct ppa_addr ppa);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
@@ -862,15 +847,15 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
struct pblk_line *gc_line, u64 paddr);
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs);
-void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
- sector_t blba, int nr_secs);
+int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
+ sector_t blba, int nr_secs, bool *from_cache);
void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
/*
* pblk user I/O write path
*/
-int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
+void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags);
int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
@@ -896,7 +881,7 @@ void pblk_write_kick(struct pblk *pblk);
* pblk read path
*/
extern struct bio_set pblk_bio_set;
-int pblk_submit_read(struct pblk *pblk, struct bio *bio);
+void pblk_submit_read(struct pblk *pblk, struct bio *bio);
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk recovery
@@ -921,6 +906,7 @@ void pblk_gc_free_full_lines(struct pblk *pblk);
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
int *gc_active);
int pblk_gc_sysfs_force(struct pblk *pblk, int force);
+void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line);
/*
* pblk rate limiter
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index d86e7a4ac04d..595542bfae85 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -41,6 +41,16 @@ config PL320_MBOX
Management Engine, primarily for cpufreq. Say Y here if you want
to use the PL320 IPCM support.
+config ARMADA_37XX_RWTM_MBOX
+ tristate "Armada 37xx rWTM BIU Mailbox"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ help
+ Mailbox implementation for communication with the the firmware
+ running on the Cortex-M3 rWTM secure processor of the Armada 37xx
+ SOC. Say Y here if you are building for such a device (for example
+ the Turris Mox router).
+
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 8be3bcbcf882..c22fad6f696b 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
+obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
+
obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
new file mode 100644
index 000000000000..97f90e97a83c
--- /dev/null
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rWTM BIU Mailbox driver for Armada 37xx
+ *
+ * Author: Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/armada-37xx-rwtm-mailbox.h>
+
+#define DRIVER_NAME "armada-37xx-rwtm-mailbox"
+
+/* relative to rWTM BIU Mailbox Registers */
+#define RWTM_MBOX_PARAM(i) (0x0 + ((i) << 2))
+#define RWTM_MBOX_COMMAND 0x40
+#define RWTM_MBOX_RETURN_STATUS 0x80
+#define RWTM_MBOX_STATUS(i) (0x84 + ((i) << 2))
+#define RWTM_MBOX_FIFO_STATUS 0xc4
+#define FIFO_STS_RDY 0x100
+#define FIFO_STS_CNTR_MASK 0x7
+#define FIFO_STS_CNTR_MAX 4
+
+#define RWTM_HOST_INT_RESET 0xc8
+#define RWTM_HOST_INT_MASK 0xcc
+#define SP_CMD_COMPLETE BIT(0)
+#define SP_CMD_QUEUE_FULL_ACCESS BIT(17)
+#define SP_CMD_QUEUE_FULL BIT(18)
+
+struct a37xx_mbox {
+ struct device *dev;
+ struct mbox_controller controller;
+ void __iomem *base;
+ int irq;
+};
+
+static void a37xx_mbox_receive(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_rx_msg rx_msg;
+ int i;
+
+ rx_msg.retval = readl(mbox->base + RWTM_MBOX_RETURN_STATUS);
+ for (i = 0; i < 16; ++i)
+ rx_msg.status[i] = readl(mbox->base + RWTM_MBOX_STATUS(i));
+
+ mbox_chan_received_data(chan, &rx_msg);
+}
+
+static irqreturn_t a37xx_mbox_irq_handler(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+
+ reg = readl(mbox->base + RWTM_HOST_INT_RESET);
+
+ if (reg & SP_CMD_COMPLETE)
+ a37xx_mbox_receive(chan);
+
+ if (reg & (SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL))
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+
+ writel(reg, mbox->base + RWTM_HOST_INT_RESET);
+ if (reg)
+ mbox_chan_txdone(chan, 0);
+
+ return reg ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int a37xx_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_tx_msg *msg = data;
+ int i;
+ u32 reg;
+
+ if (!data)
+ return -EINVAL;
+
+ reg = readl(mbox->base + RWTM_MBOX_FIFO_STATUS);
+ if (!(reg & FIFO_STS_RDY))
+ dev_warn(mbox->dev, "Secure processor not ready\n");
+
+ if ((reg & FIFO_STS_CNTR_MASK) >= FIFO_STS_CNTR_MAX) {
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 16; ++i)
+ writel(msg->args[i], mbox->base + RWTM_MBOX_PARAM(i));
+ writel(msg->command, mbox->base + RWTM_MBOX_COMMAND);
+
+ return 0;
+}
+
+static int a37xx_mbox_startup(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+ int ret;
+
+ ret = devm_request_irq(mbox->dev, mbox->irq, a37xx_mbox_irq_handler, 0,
+ DRIVER_NAME, chan);
+ if (ret < 0) {
+ dev_err(mbox->dev, "Cannot request irq\n");
+ return ret;
+ }
+
+ /* enable IRQ generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg &= ~(SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL);
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ return 0;
+}
+
+static void a37xx_mbox_shutdown(struct mbox_chan *chan)
+{
+ u32 reg;
+ struct a37xx_mbox *mbox = chan->con_priv;
+
+ /* disable interrupt generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg |= SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL;
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ devm_free_irq(mbox->dev, mbox->irq, chan);
+}
+
+static const struct mbox_chan_ops a37xx_mbox_ops = {
+ .send_data = a37xx_mbox_send_data,
+ .startup = a37xx_mbox_startup,
+ .shutdown = a37xx_mbox_shutdown,
+};
+
+static int armada_37xx_mbox_probe(struct platform_device *pdev)
+{
+ struct a37xx_mbox *mbox;
+ struct resource *regs;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mbox->base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mbox->base)) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0) {
+ dev_err(&pdev->dev, "Cannot get irq\n");
+ return mbox->irq;
+ }
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &a37xx_mbox_ops;
+ mbox->controller.txdone_irq = true;
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register mailbox controller\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ return ret;
+}
+
+static int armada_37xx_mbox_remove(struct platform_device *pdev)
+{
+ struct a37xx_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (!mbox)
+ return -EINVAL;
+
+ mbox_controller_unregister(&mbox->controller);
+
+ return 0;
+}
+
+static const struct of_device_id armada_37xx_mbox_match[] = {
+ { .compatible = "marvell,armada-3700-rwtm-mailbox" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, armada_37xx_mbox_match);
+
+static struct platform_driver armada_37xx_mbox_driver = {
+ .probe = armada_37xx_mbox_probe,
+ .remove = armada_37xx_mbox_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = armada_37xx_mbox_match,
+ },
+};
+
+module_platform_driver(armada_37xx_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 85fc5b56f99b..25be8bb5e371 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -264,7 +264,6 @@ static int imx_mu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *iomem;
struct imx_mu_priv *priv;
unsigned int i;
int ret;
@@ -275,8 +274,7 @@ static int imx_mu_probe(struct platform_device *pdev)
priv->dev = dev;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, iomem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 22811784dc7d..00d5219094e5 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 210fe504f5ae..f91dfb1327c7 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -8,9 +8,9 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
@@ -240,9 +240,11 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
/* irq */
for (i = 0; i < IPCC_IRQ_NUM; i++) {
- ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]);
+ ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
if (ipcc->irqs[i] < 0) {
- dev_err(dev, "no IRQ specified %s\n", irq_name[i]);
+ if (ipcc->irqs[i] != -EPROBE_DEFER)
+ dev_err(dev, "no IRQ specified %s\n",
+ irq_name[i]);
ret = ipcc->irqs[i];
goto err_clk;
}
@@ -263,9 +265,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
/* wakeup */
if (of_property_read_bool(np, "wakeup-source")) {
- ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup");
+ ipcc->wkp = platform_get_irq_byname(pdev, "wakeup");
if (ipcc->wkp < 0) {
- dev_err(dev, "could not get wakeup IRQ\n");
+ if (ipcc->wkp != -EPROBE_DEFER)
+ dev_err(dev, "could not get wakeup IRQ\n");
ret = ipcc->wkp;
goto err_clk;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 2557f198e175..db269a348b20 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -436,6 +436,15 @@ config DM_DELAY
If unsure, say N.
+config DM_DUST
+ tristate "Bad sector simulation target"
+ depends on BLK_DEV_DM
+ ---help---
+ A target that simulates bad sector behavior.
+ Useful for testing.
+
+ If unsure, say N.
+
config DM_INIT
bool "DM \"dm-mod.create=\" parameter support"
depends on BLK_DEV_DM=y
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index a52b703e588e..be7a6eb92abc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
+obj-$(CONFIG_DM_DUST) += dm-dust.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6fc93834da44..151aa95775be 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
if (r)
return r;
- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ for (b = 0; ; b++) {
r = fn(context, cmd->discard_block_size, to_dblock(b),
dm_bitset_cursor_get_value(&c));
if (r)
break;
+
+ if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
+ break;
+
+ r = dm_bitset_cursor_next(&c);
+ if (r)
+ break;
}
dm_bitset_cursor_end(&c);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7f6462f74ac8..1b16d34bb785 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -946,6 +946,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
{
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
+ struct mapped_device *md = dm_table_get_md(ti->table);
/* From now we require underlying device with our integrity profile */
if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
@@ -965,7 +966,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
if (crypt_integrity_aead(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
+ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
cc->integrity_tag_size, cc->integrity_iv_size);
if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
@@ -973,7 +974,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL;
}
} else if (cc->integrity_iv_size)
- DMINFO("Additional per-sector space %u bytes for IV.",
+ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
cc->integrity_iv_size);
if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
@@ -1031,11 +1032,11 @@ static u8 *org_iv_of_dmreq(struct crypt_config *cc,
return iv_of_dmreq(cc, dmreq) + cc->iv_size;
}
-static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
+static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
- return (uint64_t*) ptr;
+ return (__le64 *) ptr;
}
static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
@@ -1071,7 +1072,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv, *tag;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
@@ -1143,9 +1144,11 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
r = crypto_aead_decrypt(req);
}
- if (r == -EBADMSG)
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ if (r == -EBADMSG) {
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*sector));
+ }
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
@@ -1166,7 +1169,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
struct scatterlist *sg_in, *sg_out;
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
/* Reject unexpected unaligned bio. */
@@ -1788,7 +1791,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error == -EBADMSG) {
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
@@ -1887,7 +1891,7 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
* algorithm implementation is used. Help people debug performance
* problems by logging the ->cra_driver_name.
*/
- DMINFO("%s using implementation \"%s\"", ciphermode,
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
return 0;
}
@@ -1907,7 +1911,7 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
return err;
}
- DMINFO("%s using implementation \"%s\"", ciphermode,
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
return 0;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index fddffe251bf6..f496213f8b67 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- destroy_workqueue(dc->kdelayd_wq);
+ if (dc->kdelayd_wq)
+ destroy_workqueue(dc->kdelayd_wq);
if (dc->read.dev)
dm_put_device(ti, dc->read.dev);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
new file mode 100644
index 000000000000..845f376a72d9
--- /dev/null
+++ b/drivers/md/dm-dust.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This is a test "dust" device, which fails reads on specified
+ * sectors, emulating the behavior of a hard disk drive sending
+ * a "Read Medium Error" sense.
+ *
+ */
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+
+#define DM_MSG_PREFIX "dust"
+
+struct badblock {
+ struct rb_node node;
+ sector_t bb;
+};
+
+struct dust_device {
+ struct dm_dev *dev;
+ struct rb_root badblocklist;
+ unsigned long long badblock_count;
+ spinlock_t dust_lock;
+ unsigned int blksz;
+ unsigned int sect_per_block;
+ sector_t start;
+ bool fail_read_on_bb:1;
+ bool quiet_mode:1;
+};
+
+static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct badblock *bblk = rb_entry(node, struct badblock, node);
+
+ if (bblk->bb > blk)
+ node = node->rb_left;
+ else if (bblk->bb < blk)
+ node = node->rb_right;
+ else
+ return bblk;
+ }
+
+ return NULL;
+}
+
+static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
+{
+ struct badblock *bblk;
+ struct rb_node **link = &root->rb_node, *parent = NULL;
+ sector_t value = new->bb;
+
+ while (*link) {
+ parent = *link;
+ bblk = rb_entry(parent, struct badblock, node);
+
+ if (bblk->bb > value)
+ link = &(*link)->rb_left;
+ else if (bblk->bb < value)
+ link = &(*link)->rb_right;
+ else
+ return false;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, root);
+
+ return true;
+}
+
+static int dust_remove_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+
+ if (bblock == NULL) {
+ if (!dd->quiet_mode) {
+ DMERR("%s: block %llu not found in badblocklist",
+ __func__, block);
+ }
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ return -EINVAL;
+ }
+
+ rb_erase(&bblock->node, &dd->badblocklist);
+ dd->badblock_count--;
+ if (!dd->quiet_mode)
+ DMINFO("%s: badblock removed at block %llu", __func__, block);
+ kfree(bblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int dust_add_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
+ if (bblock == NULL) {
+ if (!dd->quiet_mode)
+ DMERR("%s: badblock allocation failed", __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock->bb = block * dd->sect_per_block;
+ if (!dust_rb_insert(&dd->badblocklist, bblock)) {
+ if (!dd->quiet_mode) {
+ DMERR("%s: block %llu already in badblocklist",
+ __func__, block);
+ }
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ kfree(bblock);
+ return -EINVAL;
+ }
+
+ dd->badblock_count++;
+ if (!dd->quiet_mode)
+ DMINFO("%s: badblock added at block %llu", __func__, block);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int dust_query_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+ if (bblock != NULL)
+ DMINFO("%s: block %llu found in badblocklist", __func__, block);
+ else
+ DMINFO("%s: block %llu not found in badblocklist", __func__, block);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
+{
+ struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+
+ if (bblk)
+ return DM_MAPIO_KILL;
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int dust_map_read(struct dust_device *dd, sector_t thisblock,
+ bool fail_read_on_bb)
+{
+ unsigned long flags;
+ int ret = DM_MAPIO_REMAPPED;
+
+ if (fail_read_on_bb) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ ret = __dust_map_read(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ }
+
+ return ret;
+}
+
+static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
+{
+ struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+
+ if (bblk) {
+ rb_erase(&bblk->node, &dd->badblocklist);
+ dd->badblock_count--;
+ kfree(bblk);
+ if (!dd->quiet_mode) {
+ sector_div(thisblock, dd->sect_per_block);
+ DMINFO("block %llu removed from badblocklist by write",
+ (unsigned long long)thisblock);
+ }
+ }
+}
+
+static int dust_map_write(struct dust_device *dd, sector_t thisblock,
+ bool fail_read_on_bb)
+{
+ unsigned long flags;
+
+ if (fail_read_on_bb) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ __dust_map_write(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int dust_map(struct dm_target *ti, struct bio *bio)
+{
+ struct dust_device *dd = ti->private;
+ int ret;
+
+ bio_set_dev(bio, dd->dev->bdev);
+ bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
+
+ if (bio_data_dir(bio) == READ)
+ ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ else
+ ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+
+ return ret;
+}
+
+static bool __dust_clear_badblocks(struct rb_root *tree,
+ unsigned long long count)
+{
+ struct rb_node *node = NULL, *nnode = NULL;
+
+ nnode = rb_first(tree);
+ if (nnode == NULL) {
+ BUG_ON(count != 0);
+ return false;
+ }
+
+ while (nnode) {
+ node = nnode;
+ nnode = rb_next(node);
+ rb_erase(node, tree);
+ count--;
+ kfree(node);
+ }
+ BUG_ON(count != 0);
+ BUG_ON(tree->rb_node != NULL);
+
+ return true;
+}
+
+static int dust_clear_badblocks(struct dust_device *dd)
+{
+ unsigned long flags;
+ struct rb_root badblocklist;
+ unsigned long long badblock_count;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ badblocklist = dd->badblocklist;
+ badblock_count = dd->badblock_count;
+ dd->badblocklist = RB_ROOT;
+ dd->badblock_count = 0;
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ if (!__dust_clear_badblocks(&badblocklist, badblock_count))
+ DMINFO("%s: no badblocks found", __func__);
+ else
+ DMINFO("%s: badblocks cleared", __func__);
+
+ return 0;
+}
+
+/*
+ * Target parameters:
+ *
+ * <device_path> <offset> <blksz>
+ *
+ * device_path: path to the block device
+ * offset: offset to data area from start of device_path
+ * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
+ */
+static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct dust_device *dd;
+ unsigned long long tmp;
+ char dummy;
+ unsigned int blksz;
+ unsigned int sect_per_block;
+ sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
+ sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
+
+ if (argc != 3) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
+ ti->error = "Invalid block size parameter";
+ return -EINVAL;
+ }
+
+ if (blksz < 512) {
+ ti->error = "Block size must be at least 512";
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(blksz)) {
+ ti->error = "Block size must be a power of 2";
+ return -EINVAL;
+ }
+
+ if (to_sector(blksz) > max_block_sectors) {
+ ti->error = "Block size is too large";
+ return -EINVAL;
+ }
+
+ sect_per_block = (blksz >> SECTOR_SHIFT);
+
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
+ ti->error = "Invalid device offset sector";
+ return -EINVAL;
+ }
+
+ dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
+ if (dd == NULL) {
+ ti->error = "Cannot allocate context";
+ return -ENOMEM;
+ }
+
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
+ ti->error = "Device lookup failed";
+ kfree(dd);
+ return -EINVAL;
+ }
+
+ dd->sect_per_block = sect_per_block;
+ dd->blksz = blksz;
+ dd->start = tmp;
+
+ /*
+ * Whether to fail a read on a "bad" block.
+ * Defaults to false; enabled later by message.
+ */
+ dd->fail_read_on_bb = false;
+
+ /*
+ * Initialize bad block list rbtree.
+ */
+ dd->badblocklist = RB_ROOT;
+ dd->badblock_count = 0;
+ spin_lock_init(&dd->dust_lock);
+
+ dd->quiet_mode = false;
+
+ BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
+
+ ti->num_discard_bios = 1;
+ ti->num_flush_bios = 1;
+ ti->private = dd;
+
+ return 0;
+}
+
+static void dust_dtr(struct dm_target *ti)
+{
+ struct dust_device *dd = ti->private;
+
+ __dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
+ dm_put_device(ti, dd->dev);
+ kfree(dd);
+}
+
+static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result_buf, unsigned int maxlen)
+{
+ struct dust_device *dd = ti->private;
+ sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ bool invalid_msg = false;
+ int result = -EINVAL;
+ unsigned long long tmp, block;
+ unsigned long flags;
+ char dummy;
+
+ if (argc == 1) {
+ if (!strcasecmp(argv[0], "addbadblock") ||
+ !strcasecmp(argv[0], "removebadblock") ||
+ !strcasecmp(argv[0], "queryblock")) {
+ DMERR("%s requires an additional argument", argv[0]);
+ } else if (!strcasecmp(argv[0], "disable")) {
+ DMINFO("disabling read failures on bad sectors");
+ dd->fail_read_on_bb = false;
+ result = 0;
+ } else if (!strcasecmp(argv[0], "enable")) {
+ DMINFO("enabling read failures on bad sectors");
+ dd->fail_read_on_bb = true;
+ result = 0;
+ } else if (!strcasecmp(argv[0], "countbadblocks")) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ DMINFO("countbadblocks: %llu badblock(s) found",
+ dd->badblock_count);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ result = 0;
+ } else if (!strcasecmp(argv[0], "clearbadblocks")) {
+ result = dust_clear_badblocks(dd);
+ } else if (!strcasecmp(argv[0], "quiet")) {
+ if (!dd->quiet_mode)
+ dd->quiet_mode = true;
+ else
+ dd->quiet_mode = false;
+ result = 0;
+ } else {
+ invalid_msg = true;
+ }
+ } else if (argc == 2) {
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
+ return result;
+
+ block = tmp;
+ sector_div(size, dd->sect_per_block);
+ if (block > size) {
+ DMERR("selected block value out of range");
+ return result;
+ }
+
+ if (!strcasecmp(argv[0], "addbadblock"))
+ result = dust_add_block(dd, block);
+ else if (!strcasecmp(argv[0], "removebadblock"))
+ result = dust_remove_block(dd, block);
+ else if (!strcasecmp(argv[0], "queryblock"))
+ result = dust_query_block(dd, block);
+ else
+ invalid_msg = true;
+
+ } else
+ DMERR("invalid number of arguments '%d'", argc);
+
+ if (invalid_msg)
+ DMERR("unrecognized message '%s' received", argv[0]);
+
+ return result;
+}
+
+static void dust_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct dust_device *dd = ti->private;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%s %s %s", dd->dev->name,
+ dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
+ dd->quiet_mode ? "quiet" : "verbose");
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %llu %u", dd->dev->name,
+ (unsigned long long)dd->start, dd->blksz);
+ break;
+ }
+}
+
+static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+ struct dust_device *dd = ti->private;
+ struct dm_dev *dev = dd->dev;
+
+ *bdev = dev->bdev;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (dd->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+
+ return 0;
+}
+
+static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
+ void *data)
+{
+ struct dust_device *dd = ti->private;
+
+ return fn(ti, dd->dev, dd->start, ti->len, data);
+}
+
+static struct target_type dust_target = {
+ .name = "dust",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = dust_ctr,
+ .dtr = dust_dtr,
+ .iterate_devices = dust_iterate_devices,
+ .map = dust_map,
+ .message = dust_message,
+ .status = dust_status,
+ .prepare_ioctl = dust_prepare_ioctl,
+};
+
+static int __init dm_dust_init(void)
+{
+ int result = dm_register_target(&dust_target);
+
+ if (result < 0)
+ DMERR("dm_register_target failed %d", result);
+
+ return result;
+}
+
+static void __exit dm_dust_exit(void)
+{
+ dm_unregister_target(&dust_target);
+}
+
+module_init(dm_dust_init);
+module_exit(dm_dust_exit);
+
+MODULE_DESCRIPTION(DM_NAME " dust test target");
+MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 721efc493942..3f4139ac1f60 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -11,6 +11,7 @@
#define _LINUX_DM_EXCEPTION_STORE
#include <linux/blkdev.h>
+#include <linux/list_bl.h>
#include <linux/device-mapper.h>
/*
@@ -27,7 +28,7 @@ typedef sector_t chunk_t;
* chunk within the device.
*/
struct dm_exception {
- struct list_head hash_list;
+ struct hlist_bl_node hash_list;
chunk_t old_chunk;
chunk_t new_chunk;
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 4b76f84424c3..352e803f566e 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -160,7 +160,7 @@ static int __init dm_parse_table(struct dm_device *dev, char *str)
while (table_entry) {
DMDEBUG("parsing table \"%s\"", str);
- if (++dev->dmi.target_count >= DM_MAX_TARGETS) {
+ if (++dev->dmi.target_count > DM_MAX_TARGETS) {
DMERR("too many targets %u > %d",
dev->dmi.target_count, DM_MAX_TARGETS);
return -EINVAL;
@@ -242,9 +242,9 @@ static int __init dm_parse_devices(struct list_head *devices, char *str)
return -ENOMEM;
list_add_tail(&dev->list, devices);
- if (++ndev >= DM_MAX_DEVICES) {
- DMERR("too many targets %u > %d",
- dev->dmi.target_count, DM_MAX_TARGETS);
+ if (++ndev > DM_MAX_DEVICES) {
+ DMERR("too many devices %lu > %d",
+ ndev, DM_MAX_DEVICES);
return -EINVAL;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c27c32cf4a30..44e76cda087a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -15,6 +15,7 @@
#include <linux/rbtree.h>
#include <linux/delay.h>
#include <linux/random.h>
+#include <linux/reboot.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/async_tx.h>
@@ -24,6 +25,7 @@
#define DEFAULT_INTERLEAVE_SECTORS 32768
#define DEFAULT_JOURNAL_SIZE_FACTOR 7
+#define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
#define DEFAULT_BUFFER_SECTORS 128
#define DEFAULT_JOURNAL_WATERMARK 50
#define DEFAULT_SYNC_MSEC 10000
@@ -33,6 +35,8 @@
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
#define RECALC_SECTORS 8192
#define RECALC_WRITE_SUPER 16
+#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
+#define BITMAP_FLUSH_INTERVAL (10 * HZ)
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -48,6 +52,7 @@
#define SB_MAGIC "integrt"
#define SB_VERSION_1 1
#define SB_VERSION_2 2
+#define SB_VERSION_3 3
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -60,12 +65,14 @@ struct superblock {
__u64 provided_data_sectors; /* userspace uses this value */
__u32 flags;
__u8 log2_sectors_per_block;
- __u8 pad[3];
+ __u8 log2_blocks_per_bitmap_bit;
+ __u8 pad[2];
__u64 recalc_sector;
};
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
#define SB_FLAG_RECALCULATING 0x2
+#define SB_FLAG_DIRTY_BITMAP 0x4
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -151,9 +158,18 @@ struct dm_integrity_c {
struct workqueue_struct *metadata_wq;
struct superblock *sb;
unsigned journal_pages;
+ unsigned n_bitmap_blocks;
+
struct page_list *journal;
struct page_list *journal_io;
struct page_list *journal_xor;
+ struct page_list *recalc_bitmap;
+ struct page_list *may_write_bitmap;
+ struct bitmap_block_status *bbs;
+ unsigned bitmap_flush_interval;
+ int synchronous_mode;
+ struct bio_list synchronous_bios;
+ struct delayed_work bitmap_flush_work;
struct crypto_skcipher *journal_crypt;
struct scatterlist **journal_scatterlist;
@@ -180,6 +196,7 @@ struct dm_integrity_c {
__s8 log2_metadata_run;
__u8 log2_buffer_sectors;
__u8 sectors_per_block;
+ __u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
int suspending;
@@ -232,17 +249,20 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
+ bool recalculate_flag;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
struct alg_spec journal_mac_alg;
atomic64_t number_of_mismatches;
+
+ struct notifier_block reboot_notifier;
};
struct dm_integrity_range {
sector_t logical_sector;
- unsigned n_sectors;
+ sector_t n_sectors;
bool waiting;
union {
struct rb_node node;
@@ -288,6 +308,16 @@ struct journal_io {
struct journal_completion *comp;
};
+struct bitmap_block_status {
+ struct work_struct work;
+ struct dm_integrity_c *ic;
+ unsigned idx;
+ unsigned long *bitmap;
+ struct bio_list bio_queue;
+ spinlock_t bio_queue_lock;
+
+};
+
static struct kmem_cache *journal_io_cache;
#define JOURNAL_IO_MEMPOOL 32
@@ -423,7 +453,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
+ ic->sb->version = SB_VERSION_3;
+ else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
ic->sb->version = SB_VERSION_2;
else
ic->sb->version = SB_VERSION_1;
@@ -447,6 +479,137 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
return dm_io(&io_req, 1, &io_loc, NULL);
}
+#define BITMAP_OP_TEST_ALL_SET 0
+#define BITMAP_OP_TEST_ALL_CLEAR 1
+#define BITMAP_OP_SET 2
+#define BITMAP_OP_CLEAR 3
+
+static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
+ sector_t sector, sector_t n_sectors, int mode)
+{
+ unsigned long bit, end_bit, this_end_bit, page, end_page;
+ unsigned long *data;
+
+ if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
+ DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
+ (unsigned long long)sector,
+ (unsigned long long)n_sectors,
+ ic->sb->log2_sectors_per_block,
+ ic->log2_blocks_per_bitmap_bit,
+ mode);
+ BUG();
+ }
+
+ if (unlikely(!n_sectors))
+ return true;
+
+ bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ end_bit = (sector + n_sectors - 1) >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+
+ page = bit / (PAGE_SIZE * 8);
+ bit %= PAGE_SIZE * 8;
+
+ end_page = end_bit / (PAGE_SIZE * 8);
+ end_bit %= PAGE_SIZE * 8;
+
+repeat:
+ if (page < end_page) {
+ this_end_bit = PAGE_SIZE * 8 - 1;
+ } else {
+ this_end_bit = end_bit;
+ }
+
+ data = lowmem_page_address(bitmap[page].page);
+
+ if (mode == BITMAP_OP_TEST_ALL_SET) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ if (data[bit / BITS_PER_LONG] != -1)
+ return false;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ if (!test_bit(bit, data))
+ return false;
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ if (data[bit / BITS_PER_LONG] != 0)
+ return false;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ if (test_bit(bit, data))
+ return false;
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_SET) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = -1;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __set_bit(bit, data);
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_CLEAR) {
+ if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
+ clear_page(data);
+ else while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = 0;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __clear_bit(bit, data);
+ bit++;
+ }
+ } else {
+ BUG();
+ }
+
+ if (unlikely(page < end_page)) {
+ bit = 0;
+ page++;
+ goto repeat;
+ }
+
+ return true;
+}
+
+static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
+{
+ unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ unsigned i;
+
+ for (i = 0; i < n_bitmap_pages; i++) {
+ unsigned long *dst_data = lowmem_page_address(dst[i].page);
+ unsigned long *src_data = lowmem_page_address(src[i].page);
+ copy_page(dst_data, src_data);
+ }
+}
+
+static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
+{
+ unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
+
+ BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
+ return &ic->bbs[bitmap_block];
+}
+
static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
bool e, const char *function)
{
@@ -455,8 +618,8 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
if (unlikely(section >= ic->journal_sections) ||
unlikely(offset >= limit)) {
- printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
- function, section, offset, ic->journal_sections, limit);
+ DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
+ function, section, offset, ic->journal_sections, limit);
BUG();
}
#endif
@@ -756,12 +919,12 @@ static void complete_journal_io(unsigned long error, void *context)
complete_journal_op(comp);
}
-static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
+ unsigned sector, unsigned n_sectors, struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
- unsigned sector, n_sectors, pl_index, pl_offset;
+ unsigned pl_index, pl_offset;
int r;
if (unlikely(dm_integrity_failed(ic))) {
@@ -770,9 +933,6 @@ static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned
return;
}
- sector = section * ic->journal_section_sectors;
- n_sectors = n_sections * ic->journal_section_sectors;
-
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
@@ -805,6 +965,17 @@ static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned
}
}
+static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
+ unsigned n_sections, struct journal_completion *comp)
+{
+ unsigned sector, n_sectors;
+
+ sector = section * ic->journal_section_sectors;
+ n_sectors = n_sections * ic->journal_section_sectors;
+
+ rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
+}
+
static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
{
struct journal_completion io_comp;
@@ -988,6 +1159,12 @@ static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrit
} while (unlikely(new_range->waiting));
}
+static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
+{
+ if (unlikely(!add_new_range(ic, new_range, true)))
+ wait_and_add_new_range(ic, new_range);
+}
+
static void init_journal_node(struct journal_node *node)
{
RB_CLEAR_NODE(&node->node);
@@ -1204,6 +1381,14 @@ static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
int r = dm_integrity_failed(ic);
if (unlikely(r) && !bio->bi_status)
bio->bi_status = errno_to_blk_status(r);
+ if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
+ unsigned long flags;
+ spin_lock_irqsave(&ic->endio_wait.lock, flags);
+ bio_list_add(&ic->synchronous_bios, bio);
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
+ return;
+ }
bio_endio(bio);
}
@@ -1477,7 +1662,8 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
else
wanted_tag_size *= ic->tag_size;
if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
- DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
+ DMERR("Invalid integrity data size %u, expected %u",
+ bip->bip_iter.bi_size, wanted_tag_size);
return DM_MAPIO_KILL;
}
}
@@ -1681,7 +1867,7 @@ retry:
unsigned ws, we, range_sectors;
dio->range.n_sectors = min(dio->range.n_sectors,
- ic->free_sectors << ic->sb->log2_sectors_per_block);
+ (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
if (unlikely(!dio->range.n_sectors)) {
if (from_map)
goto offload_to_thread;
@@ -1764,6 +1950,20 @@ offload_to_thread:
goto journal_read_write;
}
+ if (ic->mode == 'B' && dio->write) {
+ if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
+ struct bitmap_block_status *bbs;
+
+ bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
+ spin_lock(&bbs->bio_queue_lock);
+ bio_list_add(&bbs->bio_queue, bio);
+ spin_unlock(&bbs->bio_queue_lock);
+ queue_work(ic->writer_wq, &bbs->work);
+ return;
+ }
+ }
+
dio->in_flight = (atomic_t)ATOMIC_INIT(2);
if (need_sync_io) {
@@ -1790,10 +1990,15 @@ offload_to_thread:
if (need_sync_io) {
wait_for_completion_io(&read_comp);
- if (unlikely(ic->recalc_wq != NULL) &&
- ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
goto skip_check;
+ if (ic->mode == 'B') {
+ if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
+ goto skip_check;
+ }
+
if (likely(!bio->bi_status))
integrity_metadata(&dio->work);
else
@@ -1831,8 +2036,16 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
wraparound_section(ic, &ic->free_section);
ic->n_uncommitted_sections++;
}
- WARN_ON(ic->journal_sections * ic->journal_section_entries !=
- (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
+ if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
+ (ic->n_uncommitted_sections + ic->n_committed_sections) *
+ ic->journal_section_entries + ic->free_sectors)) {
+ DMCRIT("journal_sections %u, journal_section_entries %u, "
+ "n_uncommitted_sections %u, n_committed_sections %u, "
+ "journal_section_entries %u, free_sectors %u",
+ ic->journal_sections, ic->journal_section_entries,
+ ic->n_uncommitted_sections, ic->n_committed_sections,
+ ic->journal_section_entries, ic->free_sectors);
+ }
}
static void integrity_commit(struct work_struct *w)
@@ -1981,8 +2194,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
spin_lock_irq(&ic->endio_wait.lock);
- if (unlikely(!add_new_range(ic, &io->range, true)))
- wait_and_add_new_range(ic, &io->range);
+ add_new_range_and_wait(ic, &io->range);
if (likely(!from_replay)) {
struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
@@ -2120,11 +2332,14 @@ static void integrity_recalc(struct work_struct *w)
sector_t area, offset;
sector_t metadata_block;
unsigned metadata_offset;
+ sector_t logical_sector, n_sectors;
__u8 *t;
unsigned i;
int r;
unsigned super_counter = 0;
+ DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
+
spin_lock_irq(&ic->endio_wait.lock);
next_chunk:
@@ -2133,21 +2348,49 @@ next_chunk:
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
- if (unlikely(range.logical_sector >= ic->provided_data_sectors))
+ if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
+ if (ic->mode == 'B') {
+ DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ }
goto unlock_ret;
+ }
get_area_and_offset(ic, range.logical_sector, &area, &offset);
range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
if (!ic->meta_dev)
- range.n_sectors = min(range.n_sectors, (1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
-
- if (unlikely(!add_new_range(ic, &range, true)))
- wait_and_add_new_range(ic, &range);
+ range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
+ add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
+ logical_sector = range.logical_sector;
+ n_sectors = range.n_sectors;
+
+ if (ic->mode == 'B') {
+ if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
+ goto advance_and_next;
+ }
+ while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
+ ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
+ logical_sector += ic->sectors_per_block;
+ n_sectors -= ic->sectors_per_block;
+ cond_resched();
+ }
+ while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
+ ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
+ n_sectors -= ic->sectors_per_block;
+ cond_resched();
+ }
+ get_area_and_offset(ic, logical_sector, &area, &offset);
+ }
+
+ DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
recalc_write_super(ic);
+ if (ic->mode == 'B') {
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
+ }
super_counter = 0;
}
@@ -2162,7 +2405,7 @@ next_chunk:
io_req.client = ic->io;
io_loc.bdev = ic->dev->bdev;
io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = range.n_sectors;
+ io_loc.count = n_sectors;
r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r)) {
@@ -2171,8 +2414,8 @@ next_chunk:
}
t = ic->recalc_tags;
- for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
t += ic->tag_size;
}
@@ -2184,6 +2427,9 @@ next_chunk:
goto err;
}
+advance_and_next:
+ cond_resched();
+
spin_lock_irq(&ic->endio_wait.lock);
remove_range_unlocked(ic, &range);
ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
@@ -2199,6 +2445,103 @@ unlock_ret:
recalc_write_super(ic);
}
+static void bitmap_block_work(struct work_struct *w)
+{
+ struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
+ struct dm_integrity_c *ic = bbs->ic;
+ struct bio *bio;
+ struct bio_list bio_queue;
+ struct bio_list waiting;
+
+ bio_list_init(&waiting);
+
+ spin_lock(&bbs->bio_queue_lock);
+ bio_queue = bbs->bio_queue;
+ bio_list_init(&bbs->bio_queue);
+ spin_unlock(&bbs->bio_queue_lock);
+
+ while ((bio = bio_list_pop(&bio_queue))) {
+ struct dm_integrity_io *dio;
+
+ dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
+
+ if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ } else {
+ block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_SET);
+ bio_list_add(&waiting, bio);
+ }
+ }
+
+ if (bio_list_empty(&waiting))
+ return;
+
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
+ bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
+ BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
+
+ while ((bio = bio_list_pop(&waiting))) {
+ struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
+
+ block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_SET);
+
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ }
+
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
+}
+
+static void bitmap_flush_work(struct work_struct *work)
+{
+ struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
+ struct dm_integrity_range range;
+ unsigned long limit;
+ struct bio *bio;
+
+ dm_integrity_flush_buffers(ic);
+
+ range.logical_sector = 0;
+ range.n_sectors = ic->provided_data_sectors;
+
+ spin_lock_irq(&ic->endio_wait.lock);
+ add_new_range_and_wait(ic, &range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ dm_integrity_flush_buffers(ic);
+ if (ic->meta_dev)
+ blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+
+ limit = ic->provided_data_sectors;
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ limit = le64_to_cpu(ic->sb->recalc_sector)
+ >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
+ << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ }
+ /*DEBUG_print("zeroing journal\n");*/
+ block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
+
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+
+ spin_lock_irq(&ic->endio_wait.lock);
+ remove_range_unlocked(ic, &range);
+ while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
+ bio_endio(bio);
+ spin_unlock_irq(&ic->endio_wait.lock);
+ spin_lock_irq(&ic->endio_wait.lock);
+ }
+ spin_unlock_irq(&ic->endio_wait.lock);
+}
+
+
static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
unsigned n_sections, unsigned char commit_seq)
{
@@ -2395,9 +2738,37 @@ clear_journal:
init_journal_node(&ic->journal_tree[i]);
}
+static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
+{
+ DEBUG_print("dm_integrity_enter_synchronous_mode\n");
+
+ if (ic->mode == 'B') {
+ ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
+ ic->synchronous_mode = 1;
+
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ flush_workqueue(ic->commit_wq);
+ }
+}
+
+static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
+{
+ struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
+
+ DEBUG_print("dm_integrity_reboot\n");
+
+ dm_integrity_enter_synchronous_mode(ic);
+
+ return NOTIFY_DONE;
+}
+
static void dm_integrity_postsuspend(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ int r;
+
+ WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
del_timer_sync(&ic->autocommit_timer);
@@ -2406,6 +2777,9 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->recalc_wq)
drain_workqueue(ic->recalc_wq);
+ if (ic->mode == 'B')
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+
queue_work(ic->commit_wq, &ic->commit_work);
drain_workqueue(ic->commit_wq);
@@ -2416,6 +2790,18 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
dm_integrity_flush_buffers(ic);
}
+ if (ic->mode == 'B') {
+ dm_integrity_flush_buffers(ic);
+#if 1
+ /* set to 0 to test bitmap replay code */
+ init_journal(ic, 0, ic->journal_sections, 0);
+ ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+#endif
+ }
+
WRITE_ONCE(ic->suspending, 0);
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
@@ -2426,11 +2812,70 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
static void dm_integrity_resume(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ int r;
+ DEBUG_print("resume\n");
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
+ DEBUG_print("resume dirty_bitmap\n");
+ rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ if (ic->mode == 'B') {
+ if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
+ block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
+ block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
+ if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
+ BITMAP_OP_TEST_ALL_CLEAR)) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ } else {
+ DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
+ ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
+ ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ } else {
+ if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ init_journal(ic, 0, ic->journal_sections, 0);
+ replay_journal(ic);
+ ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ }
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+ } else {
+ replay_journal(ic);
+ if (ic->mode == 'B') {
+ int mode;
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+
+ mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ }
+ }
- replay_journal(ic);
-
- if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ DEBUG_print("testing recalc: %x\n", ic->sb->flags);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
+ DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
if (recalc_pos < ic->provided_data_sectors) {
queue_work(ic->recalc_wq, &ic->recalc_work);
} else if (recalc_pos > ic->provided_data_sectors) {
@@ -2438,6 +2883,16 @@ static void dm_integrity_resume(struct dm_target *ti)
recalc_write_super(ic);
}
}
+
+ ic->reboot_notifier.notifier_call = dm_integrity_reboot;
+ ic->reboot_notifier.next = NULL;
+ ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
+ WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
+
+#if 0
+ /* set to 1 to stress test synchronous mode */
+ dm_integrity_enter_synchronous_mode(ic);
+#endif
}
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
@@ -2462,10 +2917,14 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
watermark_percentage += ic->journal_entries / 2;
do_div(watermark_percentage, ic->journal_entries);
- arg_count = 5;
+ arg_count = 3;
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
+ arg_count += ic->mode == 'J';
+ arg_count += ic->mode == 'J';
+ arg_count += ic->mode == 'B';
+ arg_count += ic->mode == 'B';
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -2475,13 +2934,19 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" meta_device:%s", ic->meta_dev->name);
if (ic->sectors_per_block != 1)
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ if (ic->recalculate_flag)
DMEMIT(" recalculate");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
- DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
- DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ if (ic->mode == 'J') {
+ DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
+ DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ }
+ if (ic->mode == 'B') {
+ DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
+ DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
+ }
#define EMIT_ALG(a, n) \
do { \
@@ -2562,7 +3027,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
return -EINVAL;
} else {
- __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
+ __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
meta_size <<= ic->log2_buffer_sectors;
@@ -2659,37 +3124,37 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
}
-static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
+static void dm_integrity_free_page_list(struct page_list *pl)
{
unsigned i;
if (!pl)
return;
- for (i = 0; i < ic->journal_pages; i++)
- if (pl[i].page)
- __free_page(pl[i].page);
+ for (i = 0; pl[i].page; i++)
+ __free_page(pl[i].page);
kvfree(pl);
}
-static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
+static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
{
- size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
struct page_list *pl;
unsigned i;
- pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
+ pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
if (!pl)
return NULL;
- for (i = 0; i < ic->journal_pages; i++) {
+ for (i = 0; i < n_pages; i++) {
pl[i].page = alloc_page(GFP_KERNEL);
if (!pl[i].page) {
- dm_integrity_free_page_list(ic, pl);
+ dm_integrity_free_page_list(pl);
return NULL;
}
if (i)
pl[i - 1].next = &pl[i];
}
+ pl[i].page = NULL;
+ pl[i].next = NULL;
return pl;
}
@@ -2702,7 +3167,8 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
kvfree(sl);
}
-static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
+static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
+ struct page_list *pl)
{
struct scatterlist **sl;
unsigned i;
@@ -2721,7 +3187,8 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
unsigned idx;
page_list_location(ic, i, 0, &start_index, &start_offset);
- page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
+ page_list_location(ic, i, ic->journal_section_sectors - 1,
+ &end_index, &end_offset);
n_pages = (end_index - start_index + 1);
@@ -2842,7 +3309,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
}
ic->journal_pages = journal_pages;
- ic->journal = dm_integrity_alloc_page_list(ic);
+ ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal) {
*error = "Could not allocate memory for journal";
r = -ENOMEM;
@@ -2874,7 +3341,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
DEBUG_print("cipher %s, block size %u iv size %u\n",
ic->journal_crypt_alg.alg_string, blocksize, ivsize);
- ic->journal_io = dm_integrity_alloc_page_list(ic);
+ ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal_io) {
*error = "Could not allocate memory for journal io";
r = -ENOMEM;
@@ -2898,7 +3365,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
- ic->journal_xor = dm_integrity_alloc_page_list(ic);
+ ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal_xor) {
*error = "Could not allocate memory for journal xor";
r = -ENOMEM;
@@ -2922,7 +3389,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
memset(crypt_iv, 0x00, ivsize);
- skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
+ skcipher_request_set_crypt(req, sg, sg,
+ PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -3063,7 +3531,7 @@ bad:
* device
* offset from the start of the device
* tag size
- * D - direct writes, J - journal writes, R - recovery mode
+ * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
* number of optional arguments
* optional arguments:
* journal_sectors
@@ -3071,10 +3539,14 @@ bad:
* buffer_sectors
* journal_watermark
* commit_time
+ * meta_device
+ * block_size
+ * sectors_per_bit
+ * bitmap_flush_interval
* internal_hash
* journal_crypt
* journal_mac
- * block_size
+ * recalculate
*/
static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
@@ -3087,10 +3559,13 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{0, 9, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
- bool recalculate;
bool should_write_sb;
__u64 threshold;
unsigned long long start;
+ __s8 log2_sectors_per_bitmap_bit = -1;
+ __s8 log2_blocks_per_bitmap_bit;
+ __u64 bits_in_journal;
+ __u64 n_bitmap_bits;
#define DIRECT_ARGUMENTS 4
@@ -3114,6 +3589,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
init_waitqueue_head(&ic->copy_to_journal_wait);
init_completion(&ic->crypto_backoff);
atomic64_set(&ic->number_of_mismatches, 0);
+ ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
if (r) {
@@ -3136,10 +3612,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
- if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
+ if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
+ !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
ic->mode = argv[3][0];
- else {
- ti->error = "Invalid mode (expecting J, D, R)";
+ } else {
+ ti->error = "Invalid mode (expecting J, B, D, R)";
r = -EINVAL;
goto bad;
}
@@ -3149,7 +3626,6 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
buffer_sectors = DEFAULT_BUFFER_SECTORS;
journal_watermark = DEFAULT_JOURNAL_WATERMARK;
sync_msec = DEFAULT_SYNC_MSEC;
- recalculate = false;
ic->sectors_per_block = 1;
as.argc = argc - DIRECT_ARGUMENTS;
@@ -3161,6 +3637,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (extra_args--) {
const char *opt_string;
unsigned val;
+ unsigned long long llval;
opt_string = dm_shift_arg(&as);
if (!opt_string) {
r = -EINVAL;
@@ -3182,7 +3659,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_put_device(ti, ic->meta_dev);
ic->meta_dev = NULL;
}
- r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
+ r = dm_get_device(ti, strchr(opt_string, ':') + 1,
+ dm_table_get_mode(ti->table), &ic->meta_dev);
if (r) {
ti->error = "Device lookup failed";
goto bad;
@@ -3196,6 +3674,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
+ } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
+ log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
+ } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
+ if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ r = -EINVAL;
+ ti->error = "Invalid bitmap_flush_interval argument";
+ }
+ ic->bitmap_flush_interval = msecs_to_jiffies(val);
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
@@ -3212,7 +3698,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
- recalculate = true;
+ ic->recalculate_flag = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -3228,7 +3714,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (!journal_sectors) {
journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
- ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
+ ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
}
if (!buffer_sectors)
@@ -3263,6 +3749,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
else
ic->log2_tag_size = -1;
+ if (ic->mode == 'B' && !ic->internal_hash) {
+ r = -EINVAL;
+ ti->error = "Bitmap mode can be only used with internal hash";
+ goto bad;
+ }
+
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
ic->autocommit_msec = sync_msec;
timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
@@ -3308,7 +3800,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
INIT_WORK(&ic->commit_work, integrity_commit);
- if (ic->mode == 'J') {
+ if (ic->mode == 'J' || ic->mode == 'B') {
ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
if (!ic->writer_wq) {
ti->error = "Cannot allocate workqueue";
@@ -3349,7 +3841,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -3409,6 +3901,27 @@ try_smaller_buffer:
ti->error = "The device is too small";
goto bad;
}
+
+ if (log2_sectors_per_bitmap_bit < 0)
+ log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
+ if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
+ log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
+
+ bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
+ if (bits_in_journal > UINT_MAX)
+ bits_in_journal = UINT_MAX;
+ while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
+ log2_sectors_per_bitmap_bit++;
+
+ log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
+ ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
+ if (should_write_sb) {
+ ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
+ }
+ n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
+ + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
+ ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
+
if (!ic->meta_dev)
ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
@@ -3433,25 +3946,21 @@ try_smaller_buffer:
DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
- DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
+ DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
(unsigned long long)ic->provided_data_sectors);
DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
+ DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
- if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
+ if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
}
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
- if (!ic->internal_hash) {
- r = -EINVAL;
- ti->error = "Recalculate is only valid with internal hash";
- goto bad;
- }
+ if (ic->internal_hash) {
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@@ -3488,6 +3997,45 @@ try_smaller_buffer:
r = create_journal(ic, &ti->error);
if (r)
goto bad;
+
+ }
+
+ if (ic->mode == 'B') {
+ unsigned i;
+ unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+
+ ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ if (!ic->recalc_bitmap) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ if (!ic->may_write_bitmap) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
+ if (!ic->bbs) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
+ for (i = 0; i < ic->n_bitmap_blocks; i++) {
+ struct bitmap_block_status *bbs = &ic->bbs[i];
+ unsigned sector, pl_index, pl_offset;
+
+ INIT_WORK(&bbs->work, bitmap_block_work);
+ bbs->ic = ic;
+ bbs->idx = i;
+ bio_list_init(&bbs->bio_queue);
+ spin_lock_init(&bbs->bio_queue_lock);
+
+ sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
+ pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
+ pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
+
+ bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
+ }
}
if (should_write_sb) {
@@ -3512,6 +4060,17 @@ try_smaller_buffer:
if (r)
goto bad;
}
+ if (ic->mode == 'B') {
+ unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
+ if (!max_io_len)
+ max_io_len = 1U << 31;
+ DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
+ if (!ti->max_io_len || ti->max_io_len > max_io_len) {
+ r = dm_set_target_max_io_len(ti, max_io_len);
+ if (r)
+ goto bad;
+ }
+ }
if (!ic->internal_hash)
dm_integrity_set(ti, ic);
@@ -3520,6 +4079,7 @@ try_smaller_buffer:
ti->flush_supported = true;
return 0;
+
bad:
dm_integrity_dtr(ti);
return r;
@@ -3542,10 +4102,9 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->writer_wq);
if (ic->recalc_wq)
destroy_workqueue(ic->recalc_wq);
- if (ic->recalc_buffer)
- vfree(ic->recalc_buffer);
- if (ic->recalc_tags)
- kvfree(ic->recalc_tags);
+ vfree(ic->recalc_buffer);
+ kvfree(ic->recalc_tags);
+ kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
mempool_exit(&ic->journal_io_mempool);
@@ -3555,9 +4114,11 @@ static void dm_integrity_dtr(struct dm_target *ti)
dm_put_device(ti, ic->dev);
if (ic->meta_dev)
dm_put_device(ti, ic->meta_dev);
- dm_integrity_free_page_list(ic, ic->journal);
- dm_integrity_free_page_list(ic, ic->journal_io);
- dm_integrity_free_page_list(ic, ic->journal_xor);
+ dm_integrity_free_page_list(ic->journal);
+ dm_integrity_free_page_list(ic->journal_io);
+ dm_integrity_free_page_list(ic->journal_xor);
+ dm_integrity_free_page_list(ic->recalc_bitmap);
+ dm_integrity_free_page_list(ic->may_write_bitmap);
if (ic->journal_scatterlist)
dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
if (ic->journal_io_scatterlist)
@@ -3595,7 +4156,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c740153b4e52..1e03bc89e20f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -2069,7 +2069,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
/* alloc table */
r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
if (r)
- goto err_destroy_dm;
+ goto err_hash_remove;
/* add targets */
for (i = 0; i < dmi->target_count; i++) {
@@ -2116,6 +2116,10 @@ int __init dm_early_create(struct dm_ioctl *dmi,
err_destroy_table:
dm_table_destroy(t);
+err_hash_remove:
+ (void) __hash_remove(__get_name_cell(dmi->name));
+ /* release reference from __get_name_cell */
+ dm_put(md);
err_destroy_dm:
dm_put(md);
dm_destroy(md);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2ee5e357a0a7..dbcc1e41cd57 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -544,8 +544,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_REMAPPED;
}
-static void multipath_release_clone(struct request *clone)
+static void multipath_release_clone(struct request *clone,
+ union map_info *map_context)
{
+ if (unlikely(map_context)) {
+ /*
+ * non-NULL map_context means caller is still map
+ * method; must undo multipath_clone_and_map()
+ */
+ struct dm_mpath_io *mpio = get_mpio(map_context);
+ struct pgpath *pgpath = mpio->pgpath;
+
+ if (pgpath && pgpath->pg->ps.type->end_io)
+ pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
+ &pgpath->path,
+ mpio->nr_bytes);
+ }
+
blk_put_request(clone);
}
@@ -882,6 +897,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
+ kfree(attached_handler_name);
if (r) {
dm_put_device(ti, p->path.dev);
goto bad;
@@ -896,7 +912,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
return p;
bad:
- kfree(attached_handler_name);
free_pgpath(p);
return ERR_PTR(r);
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b66745bd08bb..5f7063f05ae0 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -168,7 +168,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
struct request *rq = tio->orig;
blk_rq_unprep_clone(clone);
- tio->ti->type->release_clone_rq(clone);
+ tio->ti->type->release_clone_rq(clone, NULL);
rq_end_stats(md, rq);
blk_mq_end_request(rq, error);
@@ -201,7 +201,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
rq_end_stats(md, rq);
if (tio->clone) {
blk_rq_unprep_clone(tio->clone);
- tio->ti->type->release_clone_rq(tio->clone);
+ tio->ti->type->release_clone_rq(tio->clone, NULL);
}
dm_mq_delay_requeue_request(rq, delay_ms);
@@ -398,7 +398,7 @@ static int map_request(struct dm_rq_target_io *tio)
case DM_MAPIO_REMAPPED:
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
- ti->type->release_clone_rq(clone);
+ ti->type->release_clone_rq(clone, &tio->info);
return DM_MAPIO_REQUEUE;
}
@@ -408,7 +408,7 @@ static int map_request(struct dm_rq_target_io *tio)
ret = dm_dispatch_clone_request(clone, rq);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
blk_rq_unprep_clone(clone);
- tio->ti->type->release_clone_rq(clone);
+ tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a168963b757d..3107f2b1988b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/list_bl.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -44,11 +45,11 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
- struct list_head *table;
+ struct hlist_bl_head *table;
};
struct dm_snapshot {
- struct mutex lock;
+ struct rw_semaphore lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -76,7 +77,9 @@ struct dm_snapshot {
atomic_t pending_exceptions_count;
- /* Protected by "lock" */
+ spinlock_t pe_allocation_lock;
+
+ /* Protected by "pe_allocation_lock" */
sector_t exception_start_sequence;
/* Protected by kcopyd single-threaded callback */
@@ -457,9 +460,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
active = s->active;
- mutex_unlock(&s->lock);
+ up_read(&s->lock);
if (active) {
if (snap_src)
@@ -618,6 +621,36 @@ static void unregister_snapshot(struct dm_snapshot *s)
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
+
+/* Lock to protect access to the completed and pending exception hash tables. */
+struct dm_exception_table_lock {
+ struct hlist_bl_head *complete_slot;
+ struct hlist_bl_head *pending_slot;
+};
+
+static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
+ struct dm_exception_table_lock *lock)
+{
+ struct dm_exception_table *complete = &s->complete;
+ struct dm_exception_table *pending = &s->pending;
+
+ lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
+ lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
+}
+
+static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_lock(lock->complete_slot);
+ hlist_bl_lock(lock->pending_slot);
+}
+
+static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_unlock(lock->pending_slot);
+ hlist_bl_unlock(lock->complete_slot);
+}
+
static int dm_exception_table_init(struct dm_exception_table *et,
uint32_t size, unsigned hash_shift)
{
@@ -625,12 +658,12 @@ static int dm_exception_table_init(struct dm_exception_table *et,
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = dm_vcalloc(size, sizeof(struct list_head));
+ et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
if (!et->table)
return -ENOMEM;
for (i = 0; i < size; i++)
- INIT_LIST_HEAD(et->table + i);
+ INIT_HLIST_BL_HEAD(et->table + i);
return 0;
}
@@ -638,15 +671,16 @@ static int dm_exception_table_init(struct dm_exception_table *et,
static void dm_exception_table_exit(struct dm_exception_table *et,
struct kmem_cache *mem)
{
- struct list_head *slot;
- struct dm_exception *ex, *next;
+ struct hlist_bl_head *slot;
+ struct dm_exception *ex;
+ struct hlist_bl_node *pos, *n;
int i, size;
size = et->hash_mask + 1;
for (i = 0; i < size; i++) {
slot = et->table + i;
- list_for_each_entry_safe (ex, next, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
kmem_cache_free(mem, ex);
}
@@ -660,7 +694,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
static void dm_remove_exception(struct dm_exception *e)
{
- list_del(&e->hash_list);
+ hlist_bl_del(&e->hash_list);
}
/*
@@ -670,11 +704,12 @@ static void dm_remove_exception(struct dm_exception *e)
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
chunk_t chunk)
{
- struct list_head *slot;
+ struct hlist_bl_head *slot;
+ struct hlist_bl_node *pos;
struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
- list_for_each_entry (e, slot, hash_list)
+ hlist_bl_for_each_entry(e, pos, slot, hash_list)
if (chunk >= e->old_chunk &&
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e;
@@ -721,7 +756,8 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
static void dm_insert_exception(struct dm_exception_table *eh,
struct dm_exception *new_e)
{
- struct list_head *l;
+ struct hlist_bl_head *l;
+ struct hlist_bl_node *pos;
struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
@@ -731,7 +767,7 @@ static void dm_insert_exception(struct dm_exception_table *eh,
goto out;
/* List is ordered by old_chunk */
- list_for_each_entry_reverse(e, l, hash_list) {
+ hlist_bl_for_each_entry(e, pos, l, hash_list) {
/* Insert after an existing chunk? */
if (new_e->old_chunk == (e->old_chunk +
dm_consecutive_chunk_count(e) + 1) &&
@@ -752,12 +788,24 @@ static void dm_insert_exception(struct dm_exception_table *eh,
return;
}
- if (new_e->old_chunk > e->old_chunk)
+ if (new_e->old_chunk < e->old_chunk)
break;
}
out:
- list_add(&new_e->hash_list, e ? &e->hash_list : l);
+ if (!e) {
+ /*
+ * Either the table doesn't support consecutive chunks or slot
+ * l is empty.
+ */
+ hlist_bl_add_head(&new_e->hash_list, l);
+ } else if (new_e->old_chunk < e->old_chunk) {
+ /* Add before an existing exception */
+ hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
+ } else {
+ /* Add to l's tail: e is the last exception in this slot */
+ hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
+ }
}
/*
@@ -766,6 +814,7 @@ out:
*/
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
+ struct dm_exception_table_lock lock;
struct dm_snapshot *s = context;
struct dm_exception *e;
@@ -778,7 +827,17 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
+ /*
+ * Although there is no need to lock access to the exception tables
+ * here, if we don't then hlist_bl_add_head(), called by
+ * dm_insert_exception(), will complain about accessing the
+ * corresponding list without locking it first.
+ */
+ dm_exception_table_lock_init(s, old, &lock);
+
+ dm_exception_table_lock(&lock);
dm_insert_exception(&s->complete, e);
+ dm_exception_table_unlock(&lock);
return 0;
}
@@ -807,7 +866,7 @@ static int calc_max_buckets(void)
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
- mem /= sizeof(struct list_head);
+ mem /= sizeof(struct hlist_bl_head);
return mem;
}
@@ -927,7 +986,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -942,7 +1001,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
if (b)
flush_bios(b);
@@ -1001,9 +1060,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
goto shut;
}
@@ -1044,10 +1103,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1089,10 +1148,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1188,10 +1247,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->snapshot_overflowed = 0;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ spin_lock_init(&s->pe_allocation_lock);
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
s->out_of_order_tree = RB_ROOT;
- mutex_init(&s->lock);
+ init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1357,9 +1417,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- mutex_lock(&snap_dest->lock);
+ down_write(&snap_dest->lock);
snap_dest->valid = 0;
- mutex_unlock(&snap_dest->lock);
+ up_write(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1390,8 +1450,6 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
- mutex_destroy(&s->lock);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
@@ -1467,6 +1525,13 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
dm_table_event(s->ti->table);
}
+static void invalidate_snapshot(struct dm_snapshot *s, int err)
+{
+ down_write(&s->lock);
+ __invalidate_snapshot(s, err);
+ up_write(&s->lock);
+}
+
static void pending_complete(void *context, int success)
{
struct dm_snap_pending_exception *pe = context;
@@ -1475,43 +1540,63 @@ static void pending_complete(void *context, int success)
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
struct bio *full_bio = NULL;
+ struct dm_exception_table_lock lock;
int error = 0;
+ dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
+
if (!success) {
/* Read/write error - snapshot is unusable */
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -EIO);
+ invalidate_snapshot(s, -EIO);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -ENOMEM);
+ invalidate_snapshot(s, -ENOMEM);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
*e = pe->e;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid) {
+ up_read(&s->lock);
free_completed_exception(e);
error = 1;
+
goto out;
}
- /* Check for conflicting reads */
- __check_for_conflicting_io(s, pe->e.old_chunk);
-
/*
- * Add a proper exception, and remove the
- * in-flight exception from the list.
+ * Add a proper exception. After inserting the completed exception all
+ * subsequent snapshot reads to this chunk will be redirected to the
+ * COW device. This ensures that we do not starve. Moreover, as long
+ * as the pending exception exists, neither origin writes nor snapshot
+ * merging can overwrite the chunk in origin.
*/
dm_insert_exception(&s->complete, e);
+ up_read(&s->lock);
+
+ /* Wait for conflicting reads to drain */
+ if (__chunk_is_tracked(s, pe->e.old_chunk)) {
+ dm_exception_table_unlock(&lock);
+ __check_for_conflicting_io(s, pe->e.old_chunk);
+ dm_exception_table_lock(&lock);
+ }
out:
+ /* Remove the in-flight exception from the list */
dm_remove_exception(&pe->e);
+
+ dm_exception_table_unlock(&lock);
+
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
full_bio = pe->full_bio;
@@ -1519,8 +1604,6 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- mutex_unlock(&s->lock);
-
/* Submit any pending write bios */
if (error) {
if (full_bio)
@@ -1660,43 +1743,59 @@ __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
}
/*
- * Looks to see if this snapshot already has a pending exception
- * for this chunk, otherwise it allocates a new one and inserts
- * it into the pending table.
+ * Inserts a pending exception into the pending table.
*
- * NOTE: a write lock must be held on snap->lock before calling
- * this.
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
*/
static struct dm_snap_pending_exception *
-__find_pending_exception(struct dm_snapshot *s,
- struct dm_snap_pending_exception *pe, chunk_t chunk)
+__insert_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
{
- struct dm_snap_pending_exception *pe2;
-
- pe2 = __lookup_pending_exception(s, chunk);
- if (pe2) {
- free_pending_exception(pe);
- return pe2;
- }
-
pe->e.old_chunk = chunk;
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
pe->started = 0;
pe->full_bio = NULL;
+ spin_lock(&s->pe_allocation_lock);
if (s->store->type->prepare_exception(s->store, &pe->e)) {
+ spin_unlock(&s->pe_allocation_lock);
free_pending_exception(pe);
return NULL;
}
pe->exception_sequence = s->exception_start_sequence++;
+ spin_unlock(&s->pe_allocation_lock);
dm_insert_exception(&s->pending, &pe->e);
return pe;
}
+/*
+ * Looks to see if this snapshot already has a pending exception
+ * for this chunk, otherwise it allocates a new one and inserts
+ * it into the pending table.
+ *
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
+ */
+static struct dm_snap_pending_exception *
+__find_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
+{
+ struct dm_snap_pending_exception *pe2;
+
+ pe2 = __lookup_pending_exception(s, chunk);
+ if (pe2) {
+ free_pending_exception(pe);
+ return pe2;
+ }
+
+ return __insert_pending_exception(s, pe, chunk);
+}
+
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
@@ -1714,6 +1813,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ struct dm_exception_table_lock lock;
init_tracked_chunk(bio);
@@ -1723,13 +1823,15 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
+ dm_exception_table_lock_init(s, chunk, &lock);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
if (!s->valid)
return DM_MAPIO_KILL;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1752,15 +1854,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
pe = alloc_pending_exception(s);
- mutex_lock(&s->lock);
-
- if (!s->valid || s->snapshot_overflowed) {
- free_pending_exception(pe);
- r = DM_MAPIO_KILL;
- goto out_unlock;
- }
+ dm_exception_table_lock(&lock);
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
@@ -1771,13 +1867,22 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
+ down_write(&s->lock);
+
if (s->store->userspace_supports_overflow) {
- s->snapshot_overflowed = 1;
- DMERR("Snapshot overflowed: Unable to allocate exception.");
+ if (s->valid && !s->snapshot_overflowed) {
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
+ }
} else
__invalidate_snapshot(s, -ENOMEM);
+ up_write(&s->lock);
+
r = DM_MAPIO_KILL;
- goto out_unlock;
+ goto out;
}
}
@@ -1789,7 +1894,10 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_full_bio(pe, bio);
goto out;
}
@@ -1797,9 +1905,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio_list_add(&pe->snapshot_bios, bio);
if (!pe->started) {
- /* this is protected by snap->lock */
+ /* this is protected by the exception table lock */
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_copy(pe);
goto out;
}
@@ -1809,7 +1920,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
out:
return r;
}
@@ -1845,7 +1957,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1876,12 +1988,12 @@ redirect_to_origin:
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return do_origin(s->origin, bio);
}
out_unlock:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return r;
}
@@ -1913,7 +2025,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
+ down_read(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1923,7 +2035,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- mutex_unlock(&snap_src->lock);
+ up_read(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1969,11 +2081,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
- mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- mutex_unlock(&snap_dest->lock);
- mutex_unlock(&snap_src->lock);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1988,9 +2100,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->active = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -2030,7 +2142,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- mutex_lock(&snap->lock);
+ down_write(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2055,7 +2167,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- mutex_unlock(&snap->lock);
+ up_write(&snap->lock);
break;
@@ -2107,9 +2219,10 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
int r = DM_MAPIO_REMAPPED;
struct dm_snapshot *snap;
struct dm_exception *e;
- struct dm_snap_pending_exception *pe;
+ struct dm_snap_pending_exception *pe, *pe2;
struct dm_snap_pending_exception *pe_to_start_now = NULL;
struct dm_snap_pending_exception *pe_to_start_last = NULL;
+ struct dm_exception_table_lock lock;
chunk_t chunk;
/* Do all the snapshots on this origin */
@@ -2121,52 +2234,59 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- mutex_lock(&snap->lock);
-
- /* Only deal with valid and active snapshots */
- if (!snap->valid || !snap->active)
- goto next_snapshot;
-
/* Nothing to do if writing beyond end of snapshot */
if (sector >= dm_table_get_size(snap->ti->table))
- goto next_snapshot;
+ continue;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
chunk = sector_to_chunk(snap->store, sector);
+ dm_exception_table_lock_init(snap, chunk, &lock);
- /*
- * Check exception table to see if block
- * is already remapped in this snapshot
- * and trigger an exception if not.
- */
- e = dm_lookup_exception(&snap->complete, chunk);
- if (e)
+ down_read(&snap->lock);
+ dm_exception_table_lock(&lock);
+
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
goto next_snapshot;
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- mutex_unlock(&snap->lock);
- pe = alloc_pending_exception(snap);
- mutex_lock(&snap->lock);
-
- if (!snap->valid) {
- free_pending_exception(pe);
- goto next_snapshot;
- }
-
+ /*
+ * Check exception table to see if block is already
+ * remapped in this snapshot and trigger an exception
+ * if not.
+ */
e = dm_lookup_exception(&snap->complete, chunk);
- if (e) {
- free_pending_exception(pe);
+ if (e)
goto next_snapshot;
- }
- pe = __find_pending_exception(snap, pe, chunk);
- if (!pe) {
- __invalidate_snapshot(snap, -ENOMEM);
- goto next_snapshot;
+ dm_exception_table_unlock(&lock);
+ pe = alloc_pending_exception(snap);
+ dm_exception_table_lock(&lock);
+
+ pe2 = __lookup_pending_exception(snap, chunk);
+
+ if (!pe2) {
+ e = dm_lookup_exception(&snap->complete, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ goto next_snapshot;
+ }
+
+ pe = __insert_pending_exception(snap, pe, chunk);
+ if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
+
+ invalidate_snapshot(snap, -ENOMEM);
+ continue;
+ }
+ } else {
+ free_pending_exception(pe);
+ pe = pe2;
}
}
@@ -2193,7 +2313,8 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- mutex_unlock(&snap->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 314d17ca6466..64dd0b34fcf4 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
return DM_MAPIO_KILL;
}
-static void io_err_release_clone_rq(struct request *clone)
+static void io_err_release_clone_rq(struct request *clone,
+ union map_info *map_context)
{
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index ed3caceaed07..7f0840601737 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -202,6 +202,13 @@ struct dm_pool_metadata {
bool fail_io:1;
/*
+ * Set once a thin-pool has been accessed through one of the interfaces
+ * that imply the pool is in-service (e.g. thin devices created/deleted,
+ * thin-pool message, metadata snapshots, etc).
+ */
+ bool in_service:1;
+
+ /*
* Reading the space map roots can fail, so we read it into these
* buffers before the superblock is locked and updated.
*/
@@ -367,6 +374,32 @@ static int subtree_equal(void *context, const void *value1_le, const void *value
/*----------------------------------------------------------------*/
+/*
+ * Variant that is used for in-core only changes or code that
+ * shouldn't put the pool in service on its own (e.g. commit).
+ */
+static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
+ __acquires(pmd->root_lock)
+{
+ down_write(&pmd->root_lock);
+}
+#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
+
+static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
+{
+ __pmd_write_lock(pmd);
+ if (unlikely(!pmd->in_service))
+ pmd->in_service = true;
+}
+
+static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
+ __releases(pmd->root_lock)
+{
+ up_write(&pmd->root_lock);
+}
+
+/*----------------------------------------------------------------*/
+
static int superblock_lock_zero(struct dm_pool_metadata *pmd,
struct dm_block **sblock)
{
@@ -790,6 +823,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
*/
BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+ if (unlikely(!pmd->in_service))
+ return 0;
+
r = __write_changed_details(pmd);
if (r < 0)
return r;
@@ -853,6 +889,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
pmd->time = 0;
INIT_LIST_HEAD(&pmd->thin_devices);
pmd->fail_io = false;
+ pmd->in_service = false;
pmd->bdev = bdev;
pmd->data_block_size = data_block_size;
@@ -903,7 +940,6 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
}
-
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
@@ -1032,10 +1068,10 @@ int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_thin(pmd, dev);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1123,10 +1159,10 @@ int dm_pool_create_snap(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_snap(pmd, dev, origin);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1166,10 +1202,10 @@ int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __delete_device(pmd, dev);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1180,7 +1216,7 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
@@ -1194,7 +1230,7 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
r = 0;
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1225,7 +1261,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
* We commit to ensure the btree roots which we increment in a
* moment are up to date.
*/
- __commit_transaction(pmd);
+ r = __commit_transaction(pmd);
+ if (r < 0) {
+ DMWARN("%s: __commit_transaction() failed, error = %d",
+ __func__, r);
+ return r;
+ }
/*
* Copy the superblock.
@@ -1283,10 +1324,10 @@ int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __reserve_metadata_snap(pmd);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1331,10 +1372,10 @@ int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __release_metadata_snap(pmd);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1377,19 +1418,19 @@ int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
if (!pmd->fail_io)
r = __open_device(pmd, dev, 0, td);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
int dm_pool_close_thin_device(struct dm_thin_device *td)
{
- down_write(&td->pmd->root_lock);
+ pmd_write_lock_in_core(td->pmd);
__close_device(td);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return 0;
}
@@ -1570,10 +1611,10 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __insert(td, block, data_block);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1657,10 +1698,10 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove(td, block);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1670,10 +1711,10 @@ int dm_thin_remove_range(struct dm_thin_device *td,
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove_range(td, begin, end);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1696,13 +1737,13 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
{
int r = 0;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_inc_block(pmd->data_sm, b);
if (r)
break;
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1711,13 +1752,13 @@ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
{
int r = 0;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_dec_block(pmd->data_sm, b);
if (r)
break;
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1765,10 +1806,10 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = dm_sm_new_block(pmd->data_sm, result);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1777,12 +1818,16 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ /*
+ * Care is taken to not have commit be what
+ * triggers putting the thin-pool in-service.
+ */
+ __pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
r = __commit_transaction(pmd);
- if (r <= 0)
+ if (r < 0)
goto out;
/*
@@ -1790,7 +1835,7 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
*/
r = __begin_transaction(pmd);
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1806,7 +1851,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
@@ -1817,7 +1862,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
pmd->fail_io = true;
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1948,10 +1993,10 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __resize_space_map(pmd->data_sm, new_count);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1960,29 +2005,29 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
if (!r)
__set_metadata_reserve(pmd);
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
{
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
dm_bm_set_read_only(pmd->bm);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
}
void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
{
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
dm_bm_set_read_write(pmd->bm);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
}
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
@@ -1992,9 +2037,9 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
{
int r;
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -2005,7 +2050,7 @@ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
r = superblock_lock(pmd, &sblock);
@@ -2019,7 +2064,7 @@ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
dm_bm_unlock(sblock);
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index f7822875589e..1cb137f0ef9d 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -190,7 +190,6 @@ struct writeback_struct {
struct dm_writecache *wc;
struct wc_entry **wc_list;
unsigned wc_list_n;
- unsigned page_offset;
struct page *page;
struct wc_entry *wc_list_inline[WB_LIST_INLINE];
struct bio bio;
@@ -546,21 +545,20 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
e = container_of(node, struct wc_entry, rb_node);
if (read_original_sector(wc, e) == block)
break;
+
node = (read_original_sector(wc, e) >= block ?
e->rb_node.rb_left : e->rb_node.rb_right);
if (unlikely(!node)) {
- if (!(flags & WFE_RETURN_FOLLOWING)) {
+ if (!(flags & WFE_RETURN_FOLLOWING))
return NULL;
- }
if (read_original_sector(wc, e) >= block) {
- break;
+ return e;
} else {
node = rb_next(&e->rb_node);
- if (unlikely(!node)) {
+ if (unlikely(!node))
return NULL;
- }
e = container_of(node, struct wc_entry, rb_node);
- break;
+ return e;
}
}
}
@@ -571,7 +569,7 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
node = rb_prev(&e->rb_node);
else
node = rb_next(&e->rb_node);
- if (!node)
+ if (unlikely(!node))
return e;
e2 = container_of(node, struct wc_entry, rb_node);
if (read_original_sector(wc, e2) != block)
@@ -804,7 +802,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
writecache_free_entry(wc, e);
}
- if (!node)
+ if (unlikely(!node))
break;
e = container_of(node, struct wc_entry, rb_node);
@@ -1478,10 +1476,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
- wb->bio.bi_end_io = writecache_writeback_endio;
- bio_set_dev(&wb->bio, wc->dev->bdev);
- wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
- wb->page_offset = PAGE_SIZE;
+ bio->bi_end_io = writecache_writeback_endio;
+ bio_set_dev(bio, wc->dev->bdev);
+ bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
GFP_NOIO | __GFP_NORETRY |
@@ -1507,12 +1504,12 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc_list[wb->wc_list_n++] = f;
e = f;
}
- bio_set_op_attrs(&wb->bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
- bio_endio(&wb->bio);
+ bio_endio(bio);
} else {
- submit_bio(&wb->bio);
+ submit_bio(bio);
}
__writeback_throttle(wc, wbl);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index fa68336560c3..d8334cd45d7c 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
goto out;
}
+ if (!nr_blkz)
+ break;
+
/* Process report */
for (i = 0; i < nr_blkz; i++) {
ret = dmz_init_zone(zmd, zone, &blkz[i]);
@@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
&blkz, &nr_blkz, GFP_NOIO);
+ if (!nr_blkz)
+ ret = -EIO;
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 8865c1709e16..51d029bbb740 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -643,7 +643,8 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
+ aligned_capacity = dev->capacity &
+ ~((sector_t)blk_queue_zone_sectors(q) - 1);
if (ti->begin ||
((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported";
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 043f0761e4a0..1fb1333fefec 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -781,7 +781,8 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
- fmode_t mode) {
+ fmode_t mode)
+{
struct table_device *td;
list_for_each_entry(td, l, list)
@@ -792,7 +793,8 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
}
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
- struct dm_dev **result) {
+ struct dm_dev **result)
+{
int r;
struct table_device *td;
@@ -1906,7 +1908,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
- struct dax_device *dax_dev = NULL;
struct mapped_device *md;
void *old_md;
@@ -1969,11 +1970,10 @@ static struct mapped_device *alloc_dev(int minor)
sprintf(md->disk->disk_name, "dm-%d", minor);
if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
- dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
- if (!dax_dev)
+ md->dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
+ if (!md->dax_dev)
goto bad;
}
- md->dax_dev = dax_dev;
add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 0a3b8ae4a29c..b8a62188f6be 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -190,6 +190,8 @@ static int sm_find_free(void *addr, unsigned begin, unsigned end,
static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
{
+ memset(ll, 0, sizeof(struct ll_disk));
+
ll->tm = tm;
ll->bitmap_info.tm = tm;
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 7ebd58a1c431..3cf25abf5807 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -2201,6 +2201,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
goto unlock;
}
+ /*
+ * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
+ * not as a in-buffer offset. We always want to mmap a whole buffer
+ * from its beginning.
+ */
+ vma->vm_pgoff = 0;
+
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
unlock:
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 82389aead6ed..ecbef266130b 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -186,12 +186,6 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return -EINVAL;
}
- /*
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
- * map whole buffer
- */
- vma->vm_pgoff = 0;
-
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
buf->dma_addr, buf->size, buf->attrs);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 270c3162fdcb..4a4c49d6085c 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -328,28 +328,18 @@ static unsigned int vb2_dma_sg_num_users(void *buf_priv)
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- int i = 0;
+ int err;
if (!buf) {
printk(KERN_ERR "No memory to map\n");
return -EINVAL;
}
- do {
- int ret;
-
- ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
- if (ret) {
- printk(KERN_ERR "Remapping memory, error: %d\n", ret);
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
-
+ err = vm_map_pages(vma, buf->pages, buf->num_pages);
+ if (err) {
+ printk(KERN_ERR "Remapping memory, error: %d\n", err);
+ return err;
+ }
/*
* Use common vm_area operations to track buffer refcount.
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index d730693f299c..8f7f8efc71a7 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -37,6 +37,25 @@
#define ISC_PFG_CFG0_BPS_TWELVE (0x0 << 28)
#define ISC_PFE_CFG0_BPS_MASK GENMASK(30, 28)
+#define ISC_PFE_CFG0_COLEN BIT(12)
+#define ISC_PFE_CFG0_ROWEN BIT(13)
+
+/* ISC Parallel Front End Configuration 1 Register */
+#define ISC_PFE_CFG1 0x00000010
+
+#define ISC_PFE_CFG1_COLMIN(v) ((v))
+#define ISC_PFE_CFG1_COLMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG1_COLMAX(v) ((v) << 16)
+#define ISC_PFE_CFG1_COLMAX_MASK GENMASK(31, 16)
+
+/* ISC Parallel Front End Configuration 2 Register */
+#define ISC_PFE_CFG2 0x00000014
+
+#define ISC_PFE_CFG2_ROWMIN(v) ((v))
+#define ISC_PFE_CFG2_ROWMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG2_ROWMAX(v) ((v) << 16)
+#define ISC_PFE_CFG2_ROWMAX_MASK GENMASK(31, 16)
+
/* ISC Clock Enable Register */
#define ISC_CLKEN 0x00000018
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index 4bba9da206e4..94cb309fdb52 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -721,6 +721,40 @@ static void isc_start_dma(struct isc_device *isc)
u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
u32 dctrl_dview;
dma_addr_t addr0;
+ u32 h, w;
+
+ h = isc->fmt.fmt.pix.height;
+ w = isc->fmt.fmt.pix.width;
+
+ /*
+ * In case the sensor is not RAW, it will output a pixel (12-16 bits)
+ * with two samples on the ISC Data bus (which is 8-12)
+ * ISC will count each sample, so, we need to multiply these values
+ * by two, to get the real number of samples for the required pixels.
+ */
+ if (!ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) {
+ h <<= 1;
+ w <<= 1;
+ }
+
+ /*
+ * We limit the column/row count that the ISC will output according
+ * to the configured resolution that we want.
+ * This will avoid the situation where the sensor is misconfigured,
+ * sending more data, and the ISC will just take it and DMA to memory,
+ * causing corruption.
+ */
+ regmap_write(regmap, ISC_PFE_CFG1,
+ (ISC_PFE_CFG1_COLMIN(0) & ISC_PFE_CFG1_COLMIN_MASK) |
+ (ISC_PFE_CFG1_COLMAX(w - 1) & ISC_PFE_CFG1_COLMAX_MASK));
+
+ regmap_write(regmap, ISC_PFE_CFG2,
+ (ISC_PFE_CFG2_ROWMIN(0) & ISC_PFE_CFG2_ROWMIN_MASK) |
+ (ISC_PFE_CFG2_ROWMAX(h - 1) & ISC_PFE_CFG2_ROWMAX_MASK));
+
+ regmap_update_bits(regmap, ISC_PFE_CFG0,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN);
addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
regmap_write(regmap, ISC_DAD0, addr0);
@@ -1965,6 +1999,8 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
struct vb2_queue *q = &isc->vb2_vidq;
int ret;
+ INIT_WORK(&isc->awb_work, isc_awb_work);
+
ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
@@ -2018,8 +2054,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
return ret;
}
- INIT_WORK(&isc->awb_work, isc_awb_work);
-
/* Register video device */
strscpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
vdev->release = video_device_release_empty;
@@ -2135,8 +2169,11 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
break;
}
- subdev_entity->asd = devm_kzalloc(dev,
- sizeof(*subdev_entity->asd), GFP_KERNEL);
+ /* asd will be freed by the subsystem once it's added to the
+ * notifier list
+ */
+ subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
+ GFP_KERNEL);
if (!subdev_entity->asd) {
of_node_put(rem);
ret = -ENOMEM;
@@ -2284,6 +2321,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
subdev_entity->asd);
if (ret) {
fwnode_handle_put(subdev_entity->asd->match.fwnode);
+ kfree(subdev_entity->asd);
goto cleanup_subdev;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 3ce58dee4422..1d96cca61547 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1515,10 +1515,20 @@ static int coda_queue_setup(struct vb2_queue *vq,
static int coda_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct coda_q_data *q_data;
q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "%s field isn't supported\n", __func__);
+ return -EINVAL;
+ }
+ }
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
v4l2_warn(&ctx->dev->v4l2_dev,
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 8339163a5231..4e24f5d781f4 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -104,7 +104,7 @@ static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
struct v4l2_output *output)
{
struct vpbe_config *cfg = vpbe_dev->cfg;
- int temp_index = output->index;
+ unsigned int temp_index = output->index;
if (temp_index >= cfg->num_outputs)
return -EINVAL;
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 37f0d7146dfa..cb6a9e3946b6 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1527,23 +1527,20 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
unsigned long size;
struct videobuf_buffer *vb;
- vb = q->bufs[b->index];
-
if (!vout->streaming)
return -EINVAL;
- if (file->f_flags & O_NONBLOCK)
- /* Call videobuf_dqbuf for non blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
- else
- /* Call videobuf_dqbuf for blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+ ret = videobuf_dqbuf(q, b, !!(file->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
addr = (unsigned long) vout->buf_phy_addr[vb->i];
size = (unsigned long) vb->size;
dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
size, DMA_TO_DEVICE);
- return ret;
+ return 0;
}
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 799e526fd3df..8f097e514900 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -68,6 +68,7 @@ struct rcar_csi2;
/* Field Detection Control */
#define FLD_REG 0x1c
#define FLD_FLD_NUM(n) (((n) & 0xff) << 16)
+#define FLD_DET_SEL(n) (((n) & 0x3) << 4)
#define FLD_FLD_EN4 BIT(3)
#define FLD_FLD_EN3 BIT(2)
#define FLD_FLD_EN2 BIT(1)
@@ -84,6 +85,9 @@ struct rcar_csi2;
/* Interrupt Enable */
#define INTEN_REG 0x30
+#define INTEN_INT_AFIFO_OF BIT(27)
+#define INTEN_INT_ERRSOTHS BIT(4)
+#define INTEN_INT_ERRSOTSYNCHS BIT(3)
/* Interrupt Source Mask */
#define INTCLOSE_REG 0x34
@@ -475,7 +479,7 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp)
static int rcsi2_start_receiver(struct rcar_csi2 *priv)
{
const struct rcar_csi2_format *format;
- u32 phycnt, vcdt = 0, vcdt2 = 0;
+ u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0;
unsigned int i;
int mbps, ret;
@@ -507,6 +511,16 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
vcdt2 |= vcdt_part << ((i % 2) * 16);
}
+ if (priv->mf.field == V4L2_FIELD_ALTERNATE) {
+ fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2
+ | FLD_FLD_EN;
+
+ if (priv->mf.height == 240)
+ fld |= FLD_FLD_NUM(0);
+ else
+ fld |= FLD_FLD_NUM(1);
+ }
+
phycnt = PHYCNT_ENABLECLK;
phycnt |= (1 << priv->lanes) - 1;
@@ -514,6 +528,10 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
if (mbps < 0)
return mbps;
+ /* Enable interrupts. */
+ rcsi2_write(priv, INTEN_REG, INTEN_INT_AFIFO_OF | INTEN_INT_ERRSOTHS
+ | INTEN_INT_ERRSOTSYNCHS);
+
/* Init */
rcsi2_write(priv, TREF_REG, TREF_TREF);
rcsi2_write(priv, PHTC_REG, 0);
@@ -549,8 +567,7 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, PHYCNT_REG, phycnt);
rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
- rcsi2_write(priv, FLD_REG, FLD_FLD_NUM(2) | FLD_FLD_EN4 |
- FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN);
+ rcsi2_write(priv, FLD_REG, fld);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ);
@@ -675,6 +692,43 @@ static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = {
.pad = &rcar_csi2_pad_ops,
};
+static irqreturn_t rcsi2_irq(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+ u32 status, err_status;
+
+ status = rcsi2_read(priv, INTSTATE_REG);
+ err_status = rcsi2_read(priv, INTERRSTATE_REG);
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTSTATE_REG, status);
+
+ if (!err_status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTERRSTATE_REG, err_status);
+
+ dev_info(priv->dev, "Transfer error, restarting CSI-2 receiver\n");
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rcsi2_irq_thread(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+
+ mutex_lock(&priv->lock);
+ rcsi2_stop(priv);
+ usleep_range(1000, 2000);
+ if (rcsi2_start(priv))
+ dev_warn(priv->dev, "Failed to restart CSI-2 receiver\n");
+ mutex_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
/* -----------------------------------------------------------------------------
* Async handling and registration of subdevices and links.
*/
@@ -947,7 +1001,7 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
struct platform_device *pdev)
{
struct resource *res;
- int irq;
+ int irq, ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
@@ -958,6 +1012,12 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
if (irq < 0)
return irq;
+ ret = devm_request_threaded_irq(&pdev->dev, irq, rcsi2_irq,
+ rcsi2_irq_thread, IRQF_SHARED,
+ KBUILD_MODNAME, priv);
+ if (ret)
+ return ret;
+
priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return PTR_ERR(priv->rstc);
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index 7fb3a4fa07c1..447bdfbe5afe 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -334,8 +334,8 @@ static int tegra_cec_probe(struct platform_device *pdev)
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
- if (!hdmi_dev)
- return -ENODEV;
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 08929c087e27..870a2a526e0b 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -186,12 +186,12 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
- flags, dma->pages, NULL);
+ err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ flags | FOLL_LONGTERM, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+ dprintk(1, "get_user_pages: err=%d [%d]\n", err,
dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index c3748b414c27..0322df9dc249 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
struct atmel_ebi_dev_config {
int cs;
@@ -36,6 +37,7 @@ struct atmel_ebi_dev {
struct atmel_ebi_caps {
unsigned int available_cs;
unsigned int ebi_csa_offs;
+ const char *regmap_name;
void (*get_config)(struct atmel_ebi_dev *ebid,
struct atmel_ebi_dev_config *conf);
int (*xlate_config)(struct atmel_ebi_dev *ebid,
@@ -47,7 +49,7 @@ struct atmel_ebi_caps {
struct atmel_ebi {
struct clk *clk;
- struct regmap *matrix;
+ struct regmap *regmap;
struct {
struct regmap *regmap;
struct clk *clk;
@@ -357,7 +359,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
* one "atmel,smc-" property is present.
*/
if (ebi->caps->ebi_csa_offs && apply)
- regmap_update_bits(ebi->matrix,
+ regmap_update_bits(ebi->regmap,
ebi->caps->ebi_csa_offs,
BIT(cs), 0);
@@ -372,6 +374,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
static const struct atmel_ebi_caps at91sam9260_ebi_caps = {
.available_cs = 0xff,
.ebi_csa_offs = AT91SAM9260_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -380,6 +383,7 @@ static const struct atmel_ebi_caps at91sam9260_ebi_caps = {
static const struct atmel_ebi_caps at91sam9261_ebi_caps = {
.available_cs = 0xff,
.ebi_csa_offs = AT91SAM9261_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -388,6 +392,7 @@ static const struct atmel_ebi_caps at91sam9261_ebi_caps = {
static const struct atmel_ebi_caps at91sam9263_ebi0_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9263_MATRIX_EBI0CSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -396,6 +401,7 @@ static const struct atmel_ebi_caps at91sam9263_ebi0_caps = {
static const struct atmel_ebi_caps at91sam9263_ebi1_caps = {
.available_cs = 0x7,
.ebi_csa_offs = AT91SAM9263_MATRIX_EBI1CSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -404,6 +410,7 @@ static const struct atmel_ebi_caps at91sam9263_ebi1_caps = {
static const struct atmel_ebi_caps at91sam9rl_ebi_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9RL_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -412,6 +419,7 @@ static const struct atmel_ebi_caps at91sam9rl_ebi_caps = {
static const struct atmel_ebi_caps at91sam9g45_ebi_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9G45_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -420,6 +428,7 @@ static const struct atmel_ebi_caps at91sam9g45_ebi_caps = {
static const struct atmel_ebi_caps at91sam9x5_ebi_caps = {
.available_cs = 0x3f,
.ebi_csa_offs = AT91SAM9X5_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
.get_config = at91sam9_ebi_get_config,
.xlate_config = atmel_ebi_xslate_smc_config,
.apply_config = at91sam9_ebi_apply_config,
@@ -432,6 +441,15 @@ static const struct atmel_ebi_caps sama5d3_ebi_caps = {
.apply_config = sama5_ebi_apply_config,
};
+static const struct atmel_ebi_caps sam9x60_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa_offs = AT91_SFR_CCFG_EBICSA,
+ .regmap_name = "microchip,sfr",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
static const struct of_device_id atmel_ebi_id_table[] = {
{
.compatible = "atmel,at91sam9260-ebi",
@@ -465,6 +483,10 @@ static const struct of_device_id atmel_ebi_id_table[] = {
.compatible = "atmel,sama5d3-ebi",
.data = &sama5d3_ebi_caps,
},
+ {
+ .compatible = "microchip,sam9x60-ebi",
+ .data = &sam9x60_ebi_caps,
+ },
{ /* sentinel */ }
};
@@ -543,13 +565,14 @@ static int atmel_ebi_probe(struct platform_device *pdev)
/*
* The sama5d3 does not provide an EBICSA register and thus does need
- * to access the matrix registers.
+ * to access it.
*/
if (ebi->caps->ebi_csa_offs) {
- ebi->matrix =
- syscon_regmap_lookup_by_phandle(np, "atmel,matrix");
- if (IS_ERR(ebi->matrix))
- return PTR_ERR(ebi->matrix);
+ ebi->regmap =
+ syscon_regmap_lookup_by_phandle(np,
+ ebi->caps->regmap_name);
+ if (IS_ERR(ebi->regmap))
+ return PTR_ERR(ebi->regmap);
}
ret = of_property_read_u32(np, "#address-cells", &val);
diff --git a/drivers/memory/emif.h b/drivers/memory/emif.h
index 9e9f8037955d..6b71fadb3cfa 100644
--- a/drivers/memory/emif.h
+++ b/drivers/memory/emif.h
@@ -537,6 +537,9 @@
#define MCONNID_SHIFT 0
#define MCONNID_MASK (0xff << 0)
+/* READ_WRITE_LEVELING_CONTROL */
+#define RDWRLVLFULL_START 0x80000000
+
/* DDR_PHY_CTRL_1 - EMIF4D */
#define DLL_SLAVE_DLY_CTRL_SHIFT_4D 4
#define DLL_SLAVE_DLY_CTRL_MASK_4D (0xFF << 4)
@@ -598,6 +601,7 @@ extern struct emif_regs_amx3 ti_emif_regs_amx3;
void ti_emif_save_context(void);
void ti_emif_restore_context(void);
+void ti_emif_run_hw_leveling(void);
void ti_emif_enter_sr(void);
void ti_emif_exit_sr(void);
void ti_emif_abort_sr(void);
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 0a53598d982f..163b6c69e651 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -51,6 +51,9 @@
#define MC_EMEM_ADR_CFG 0x54
#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
+#define MC_TIMING_CONTROL 0xfc
+#define MC_TIMING_UPDATE BIT(0)
+
static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
{ .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
@@ -74,7 +77,7 @@ static const struct of_device_id tegra_mc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
-static int terga_mc_block_dma_common(struct tegra_mc *mc,
+static int tegra_mc_block_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -90,13 +93,13 @@ static int terga_mc_block_dma_common(struct tegra_mc *mc,
return 0;
}
-static bool terga_mc_dma_idling_common(struct tegra_mc *mc,
+static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
}
-static int terga_mc_unblock_dma_common(struct tegra_mc *mc,
+static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -112,17 +115,17 @@ static int terga_mc_unblock_dma_common(struct tegra_mc *mc,
return 0;
}
-static int terga_mc_reset_status_common(struct tegra_mc *mc,
+static int tegra_mc_reset_status_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
}
-const struct tegra_mc_reset_ops terga_mc_reset_ops_common = {
- .block_dma = terga_mc_block_dma_common,
- .dma_idling = terga_mc_dma_idling_common,
- .unblock_dma = terga_mc_unblock_dma_common,
- .reset_status = terga_mc_reset_status_common,
+const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
+ .block_dma = tegra_mc_block_dma_common,
+ .dma_idling = tegra_mc_dma_idling_common,
+ .unblock_dma = tegra_mc_unblock_dma_common,
+ .reset_status = tegra_mc_reset_status_common,
};
static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
@@ -282,25 +285,28 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
u32 value;
/* compute the number of MC clock cycles per tick */
- tick = mc->tick * clk_get_rate(mc->clk);
+ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
do_div(tick, NSEC_PER_SEC);
- value = readl(mc->regs + MC_EMEM_ARB_CFG);
+ value = mc_readl(mc, MC_EMEM_ARB_CFG);
value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
- writel(value, mc->regs + MC_EMEM_ARB_CFG);
+ mc_writel(mc, value, MC_EMEM_ARB_CFG);
/* write latency allowance defaults */
for (i = 0; i < mc->soc->num_clients; i++) {
const struct tegra_mc_la *la = &mc->soc->clients[i].la;
u32 value;
- value = readl(mc->regs + la->reg);
+ value = mc_readl(mc, la->reg);
value &= ~(la->mask << la->shift);
value |= (la->def & la->mask) << la->shift;
- writel(value, mc->regs + la->reg);
+ mc_writel(mc, value, la->reg);
}
+ /* latch new values */
+ mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+
return 0;
}
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 887a3b07334f..392993955c93 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -35,7 +35,7 @@ static inline void mc_writel(struct tegra_mc *mc, u32 value,
writel_relaxed(value, mc->regs + offset);
}
-extern const struct tegra_mc_reset_ops terga_mc_reset_ops_common;
+extern const struct tegra_mc_reset_ops tegra_mc_reset_ops_common;
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
extern const struct tegra_mc_soc tegra20_mc_soc;
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 6560a5101322..62305fafd641 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -572,7 +572,7 @@ static const struct tegra_mc_client tegra114_mc_clients[] = {
},
}, {
.id = 0x34,
- .name = "fdcwr2",
+ .name = "fdcdwr2",
.swgroup = TEGRA_SWGROUP_NV,
.smmu = {
.reg = 0x22c,
@@ -975,7 +975,7 @@ const struct tegra_mc_soc tegra114_mc_soc = {
.smmu = &tegra114_smmu_soc,
.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra114_mc_resets,
.num_resets = ARRAY_SIZE(tegra114_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index eedb7d48e2ea..772716ab6b23 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -20,6 +20,7 @@
#include <linux/clkdev.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index b561a1fe7f46..8f8487bda642 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1074,7 +1074,7 @@ const struct tegra_mc_soc tegra124_mc_soc = {
.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
};
@@ -1104,7 +1104,7 @@ const struct tegra_mc_soc tegra132_mc_soc = {
.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index 7119e532471c..121237b16add 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -198,7 +198,7 @@ static const struct tegra_mc_reset tegra20_mc_resets[] = {
TEGRA20_MC_RESET(VI, 0x100, 0x178, 0x104, 14),
};
-static int terga20_mc_hotreset_assert(struct tegra_mc *mc,
+static int tegra20_mc_hotreset_assert(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -214,7 +214,7 @@ static int terga20_mc_hotreset_assert(struct tegra_mc *mc,
return 0;
}
-static int terga20_mc_hotreset_deassert(struct tegra_mc *mc,
+static int tegra20_mc_hotreset_deassert(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -230,7 +230,7 @@ static int terga20_mc_hotreset_deassert(struct tegra_mc *mc,
return 0;
}
-static int terga20_mc_block_dma(struct tegra_mc *mc,
+static int tegra20_mc_block_dma(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -246,19 +246,19 @@ static int terga20_mc_block_dma(struct tegra_mc *mc,
return 0;
}
-static bool terga20_mc_dma_idling(struct tegra_mc *mc,
+static bool tegra20_mc_dma_idling(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return mc_readl(mc, rst->status) == 0;
}
-static int terga20_mc_reset_status(struct tegra_mc *mc,
+static int tegra20_mc_reset_status(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return (mc_readl(mc, rst->reset) & BIT(rst->bit)) == 0;
}
-static int terga20_mc_unblock_dma(struct tegra_mc *mc,
+static int tegra20_mc_unblock_dma(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -274,13 +274,13 @@ static int terga20_mc_unblock_dma(struct tegra_mc *mc,
return 0;
}
-const struct tegra_mc_reset_ops terga20_mc_reset_ops = {
- .hotreset_assert = terga20_mc_hotreset_assert,
- .hotreset_deassert = terga20_mc_hotreset_deassert,
- .block_dma = terga20_mc_block_dma,
- .dma_idling = terga20_mc_dma_idling,
- .unblock_dma = terga20_mc_unblock_dma,
- .reset_status = terga20_mc_reset_status,
+static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = {
+ .hotreset_assert = tegra20_mc_hotreset_assert,
+ .hotreset_deassert = tegra20_mc_hotreset_deassert,
+ .block_dma = tegra20_mc_block_dma,
+ .dma_idling = tegra20_mc_dma_idling,
+ .unblock_dma = tegra20_mc_unblock_dma,
+ .reset_status = tegra20_mc_reset_status,
};
const struct tegra_mc_soc tegra20_mc_soc = {
@@ -290,7 +290,7 @@ const struct tegra_mc_soc tegra20_mc_soc = {
.client_id_mask = 0x3f,
.intmask = MC_INT_SECURITY_VIOLATION | MC_INT_INVALID_GART_PAGE |
MC_INT_DECERR_EMEM,
- .reset_ops = &terga20_mc_reset_ops,
+ .reset_ops = &tegra20_mc_reset_ops,
.resets = tegra20_mc_resets,
.num_resets = ARRAY_SIZE(tegra20_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index d00a77160407..aa22cda637eb 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -1132,7 +1132,7 @@ const struct tegra_mc_soc tegra210_mc_soc = {
.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra210_mc_resets,
.num_resets = ARRAY_SIZE(tegra210_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index bee5314ed404..c9af0f682ead 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -726,7 +726,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
},
}, {
.id = 0x34,
- .name = "fdcwr2",
+ .name = "fdcdwr2",
.swgroup = TEGRA_SWGROUP_NV2,
.smmu = {
.reg = 0x22c,
@@ -999,7 +999,7 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.smmu = &tegra30_smmu_soc,
.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra30_mc_resets,
.num_resets = ARRAY_SIZE(tegra30_mc_resets),
};
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 2250d03ea17f..ab07aa163138 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -138,6 +138,9 @@ static int ti_emif_alloc_sram(struct device *dev,
emif_data->pm_functions.exit_sr =
sram_resume_address(emif_data,
(unsigned long)ti_emif_exit_sr);
+ emif_data->pm_functions.run_hw_leveling =
+ sram_resume_address(emif_data,
+ (unsigned long)ti_emif_run_hw_leveling);
emif_data->pm_data.regs_virt =
(struct emif_regs_amx3 *)emif_data->ti_emif_sram_data_virt;
diff --git a/drivers/memory/ti-emif-sram-pm.S b/drivers/memory/ti-emif-sram-pm.S
index a5369181e5c2..d75ae18efa7d 100644
--- a/drivers/memory/ti-emif-sram-pm.S
+++ b/drivers/memory/ti-emif-sram-pm.S
@@ -27,6 +27,7 @@
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700
#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT
+#define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT
#define EMIF_STATUS_READY 0x4
#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120
@@ -245,6 +246,46 @@ emif_skip_restore_extra_regs:
ENDPROC(ti_emif_restore_context)
/*
+ * void ti_emif_run_hw_leveling(void)
+ *
+ * Used during resume to run hardware leveling again and restore the
+ * configuration of the EMIF PHY, only for DDR3.
+ */
+ENTRY(ti_emif_run_hw_leveling)
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
+
+ ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+ orr r3, r3, #RDWRLVLFULL_START
+ ldr r2, [r0, #EMIF_SDRAM_CONFIG]
+ and r2, r2, #SDRAM_TYPE_MASK
+ cmp r2, #EMIF_SDCFG_TYPE_DDR3
+ bne skip_hwlvl
+
+ str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+
+ /*
+ * If EMIF registers are touched during initial stage of HW
+ * leveling sequence there will be an L3 NOC timeout error issued
+ * as the EMIF will not respond, which is not fatal, but it is
+ * avoidable. This small wait loop is enough time for this condition
+ * to clear, even at worst case of CPU running at max speed of 1Ghz.
+ */
+ mov r2, #0x2000
+1:
+ subs r2, r2, #0x1
+ bne 1b
+
+ /* Bit clears when operation is complete */
+2: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+ tst r1, #RDWRLVLFULL_START
+ bne 2b
+
+skip_hwlvl:
+ mov pc, lr
+ENDPROC(ti_emif_run_hw_leveling)
+
+/*
* void ti_emif_enter_sr(void)
*
* Programs the EMIF to tell the SDRAM to enter into self-refresh
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 26ad6468d13a..294d9567cc71 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -16,7 +16,7 @@ config MFD_CS5535
depends on PCI && (X86_32 || (X86 && COMPILE_TEST))
---help---
This is the core driver for CS5535/CS5536 MFD functions. This is
- necessary for using the board's GPIO and MFGPT functionality.
+ necessary for using the board's GPIO and MFGPT functionality.
config MFD_ALTERA_A10SR
bool "Altera Arria10 DevKit System Resource chip"
@@ -29,6 +29,16 @@ config MFD_ALTERA_A10SR
accessing the external gpio extender (LEDs & buttons) and
power supply alarms (hwmon).
+config MFD_ALTERA_SYSMGR
+ bool "Altera SOCFPGA System Manager"
+ depends on (ARCH_SOCFPGA || ARCH_STRATIX10) && OF
+ select MFD_SYSCON
+ help
+ Select this to get System Manager support for all Altera branded
+ SOCFPGAs. The SOCFPGA System Manager handles all SOCFPGAs by
+ using regmap_mmio accesses for ARM32 parts and SMC calls to
+ EL3 for ARM64 parts.
+
config MFD_ACT8945A
tristate "Active-semi ACT8945A"
select MFD_CORE
@@ -213,13 +223,13 @@ config MFD_CROS_EC
protocol for talking to the EC is defined by the bus driver.
config MFD_CROS_EC_CHARDEV
- tristate "Chrome OS Embedded Controller userspace device interface"
- depends on MFD_CROS_EC
- ---help---
- This driver adds support to talk with the ChromeOS EC from userspace.
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
- If you have a supported Chromebook, choose Y or M here.
- The module will be called cros_ec_dev.
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
config MFD_MADERA
tristate "Cirrus Logic Madera codecs"
@@ -733,6 +743,20 @@ config MFD_MAX77620
provides common support for accessing the device; additional drivers
must be enabled in order to use the functionality of the device.
+config MFD_MAX77650
+ tristate "Maxim MAX77650/77651 PMIC Support"
+ depends on I2C
+ depends on OF || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say Y here to add support for Maxim Semiconductor MAX77650 and
+ MAX77651 Power Management ICs. This is the core multifunction
+ driver for interacting with the device. The module name is
+ 'max77650'. Additional drivers can be enabled in order to use
+ the following functionalities of the device: GPIO, regulator,
+ charger, LED, onkey.
+
config MFD_MAX77686
tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
depends on I2C
@@ -867,7 +891,7 @@ config MFD_CPCAP
At least Motorola Droid 4 is known to use CPCAP.
config MFD_VIPERBOARD
- tristate "Nano River Technologies Viperboard"
+ tristate "Nano River Technologies Viperboard"
select MFD_CORE
depends on USB
default n
@@ -903,15 +927,15 @@ config PCF50633_ADC
tristate "NXP PCF50633 ADC"
depends on MFD_PCF50633
help
- Say yes here if you want to include support for ADC in the
- NXP PCF50633 chip.
+ Say yes here if you want to include support for ADC in the
+ NXP PCF50633 chip.
config PCF50633_GPIO
tristate "NXP PCF50633 GPIO"
depends on MFD_PCF50633
help
- Say yes here if you want to include support GPIO for pins on
- the PCF50633 chip.
+ Say yes here if you want to include support GPIO for pins on
+ the PCF50633 chip.
config UCB1400_CORE
tristate "Philips UCB1400 Core driver"
@@ -1026,7 +1050,7 @@ config MFD_RN5T618
select REGMAP_I2C
help
Say yes here to add support for the Ricoh RN5T567,
- RN5T618, RC5T619 PMIC.
+ RN5T618, RC5T619 PMIC.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -1079,9 +1103,9 @@ config MFD_SM501_GPIO
bool "Export GPIO via GPIO layer"
depends on MFD_SM501 && GPIOLIB
---help---
- This option uses the gpio library layer to export the 64 GPIO
- lines on the SM501. The platform data is used to supply the
- base number for the first GPIO line to register.
+ This option uses the gpio library layer to export the 64 GPIO
+ lines on the SM501. The platform data is used to supply the
+ base number for the first GPIO line to register.
config MFD_SKY81452
tristate "Skyworks Solutions SKY81452"
@@ -1096,16 +1120,16 @@ config MFD_SKY81452
will be called sky81452.
config MFD_SMSC
- bool "SMSC ECE1099 series chips"
- depends on I2C=y
- select MFD_CORE
- select REGMAP_I2C
- help
- If you say yes here you get support for the
- ece1099 chips from SMSC.
+ bool "SMSC ECE1099 series chips"
+ depends on I2C=y
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the
+ ece1099 chips from SMSC.
- To compile this driver as a module, choose M here: the
- module will be called smsc.
+ To compile this driver as a module, choose M here: the
+ module will be called smsc.
config MFD_SC27XX_PMIC
tristate "Spreadtrum SC27xx PMICs"
@@ -1171,12 +1195,12 @@ config AB8500_CORE
This chip embeds various other multimedia funtionalities as well.
config AB8500_DEBUG
- bool "Enable debug info via debugfs"
- depends on AB8500_GPADC && DEBUG_FS
- default y if DEBUG_FS
- help
- Select this option if you want debug information using the debug
- filesystem, debugfs.
+ bool "Enable debug info via debugfs"
+ depends on AB8500_GPADC && DEBUG_FS
+ default y if DEBUG_FS
+ help
+ Select this option if you want debug information using the debug
+ filesystem, debugfs.
config AB8500_GPADC
bool "ST-Ericsson AB8500 GPADC driver"
@@ -1907,6 +1931,19 @@ config MFD_STPMIC1
To compile this driver as a module, choose M here: the
module will be called stpmic1.
+config MFD_STMFX
+ tristate "Support for STMicroelectronics Multi-Function eXpander (STMFX)"
+ depends on I2C
+ depends on OF || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support for the STMicroelectronics Multi-Function eXpander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b4569ed7f3f3..52b1a90ff515 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -155,6 +155,7 @@ obj-$(CONFIG_MFD_DA9150) += da9150-core.o
obj-$(CONFIG_MFD_MAX14577) += max14577.o
obj-$(CONFIG_MFD_MAX77620) += max77620.o
+obj-$(CONFIG_MFD_MAX77650) += max77650.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
obj-$(CONFIG_MFD_MAX77843) += max77843.o
@@ -237,6 +238,7 @@ obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
+obj-$(CONFIG_MFD_ALTERA_SYSMGR) += altera-sysmgr.o
obj-$(CONFIG_MFD_STPMIC1) += stpmic1.o
obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o
@@ -246,4 +248,4 @@ obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
-
+obj-$(CONFIG_MFD_STMFX) += stmfx.o
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 8d652b2f9d14..f70d3f6a959b 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -2587,7 +2587,7 @@ static ssize_t ab8500_unsubscribe_write(struct file *file,
}
/*
- * - several deubgfs nodes fops
+ * - several debugfs nodes fops
*/
static const struct file_operations ab8500_bank_fops = {
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
new file mode 100644
index 000000000000..8976f82785bb
--- /dev/null
+++ b/drivers/mfd/altera-sysmgr.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Intel Corporation.
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Based on syscon driver.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/altera-sysmgr.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/**
+ * struct altr_sysmgr - Altera SOCFPGA System Manager
+ * @regmap: the regmap used for System Manager accesses.
+ * @base : the base address for the System Manager
+ */
+struct altr_sysmgr {
+ struct regmap *regmap;
+ resource_size_t *base;
+};
+
+static struct platform_driver altr_sysmgr_driver;
+
+/**
+ * s10_protected_reg_write
+ * Write to a protected SMC register.
+ * @base: Base address of System Manager
+ * @reg: Address offset of register
+ * @val: Value to write
+ * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
+ * INTEL_SIP_SMC_REG_ERROR on error
+ * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
+ */
+static int s10_protected_reg_write(void *base,
+ unsigned int reg, unsigned int val)
+{
+ struct arm_smccc_res result;
+ unsigned long sysmgr_base = (unsigned long)base;
+
+ arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, sysmgr_base + reg,
+ val, 0, 0, 0, 0, 0, &result);
+
+ return (int)result.a0;
+}
+
+/**
+ * s10_protected_reg_read
+ * Read the status of a protected SMC register
+ * @base: Base address of System Manager.
+ * @reg: Address of register
+ * @val: Value read.
+ * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
+ * INTEL_SIP_SMC_REG_ERROR on error
+ * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
+ */
+static int s10_protected_reg_read(void *base,
+ unsigned int reg, unsigned int *val)
+{
+ struct arm_smccc_res result;
+ unsigned long sysmgr_base = (unsigned long)base;
+
+ arm_smccc_smc(INTEL_SIP_SMC_REG_READ, sysmgr_base + reg,
+ 0, 0, 0, 0, 0, 0, &result);
+
+ *val = (unsigned int)result.a1;
+
+ return (int)result.a0;
+}
+
+static struct regmap_config altr_sysmgr_regmap_cfg = {
+ .name = "altr_sysmgr",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+/**
+ * sysmgr_match_phandle
+ * Matching function used by driver_find_device().
+ * Return: True if match is found, otherwise false.
+ */
+static int sysmgr_match_phandle(struct device *dev, void *data)
+{
+ return dev->of_node == (struct device_node *)data;
+}
+
+/**
+ * altr_sysmgr_regmap_lookup_by_phandle
+ * Find the sysmgr previous configured in probe() and return regmap property.
+ * Return: regmap if found or error if not found.
+ */
+struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device *dev;
+ struct altr_sysmgr *sysmgr;
+ struct device_node *sysmgr_np;
+
+ if (property)
+ sysmgr_np = of_parse_phandle(np, property, 0);
+ else
+ sysmgr_np = np;
+
+ if (!sysmgr_np)
+ return ERR_PTR(-ENODEV);
+
+ dev = driver_find_device(&altr_sysmgr_driver.driver, NULL,
+ (void *)sysmgr_np, sysmgr_match_phandle);
+ of_node_put(sysmgr_np);
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ sysmgr = dev_get_drvdata(dev);
+
+ return sysmgr->regmap;
+}
+EXPORT_SYMBOL_GPL(altr_sysmgr_regmap_lookup_by_phandle);
+
+static int sysmgr_probe(struct platform_device *pdev)
+{
+ struct altr_sysmgr *sysmgr;
+ struct regmap *regmap;
+ struct resource *res;
+ struct regmap_config sysmgr_config = altr_sysmgr_regmap_cfg;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ sysmgr = devm_kzalloc(dev, sizeof(*sysmgr), GFP_KERNEL);
+ if (!sysmgr)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ sysmgr_config.max_register = resource_size(res) -
+ sysmgr_config.reg_stride;
+ if (of_device_is_compatible(np, "altr,sys-mgr-s10")) {
+ /* Need physical address for SMCC call */
+ sysmgr->base = (resource_size_t *)res->start;
+ sysmgr_config.reg_read = s10_protected_reg_read;
+ sysmgr_config.reg_write = s10_protected_reg_write;
+
+ regmap = devm_regmap_init(dev, NULL, sysmgr->base,
+ &sysmgr_config);
+ } else {
+ sysmgr->base = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!sysmgr->base)
+ return -ENOMEM;
+
+ sysmgr_config.max_register = res->end - res->start - 3;
+ regmap = devm_regmap_init_mmio(dev, sysmgr->base,
+ &sysmgr_config);
+ }
+
+ if (IS_ERR(regmap)) {
+ pr_err("regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ sysmgr->regmap = regmap;
+
+ platform_set_drvdata(pdev, sysmgr);
+
+ return 0;
+}
+
+static const struct of_device_id altr_sysmgr_of_match[] = {
+ { .compatible = "altr,sys-mgr" },
+ { .compatible = "altr,sys-mgr-s10" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_sysmgr_of_match);
+
+static struct platform_driver altr_sysmgr_driver = {
+ .probe = sysmgr_probe,
+ .driver = {
+ .name = "altr,system_manager",
+ .of_match_table = altr_sysmgr_of_match,
+ },
+};
+
+static int __init altr_sysmgr_init(void)
+{
+ return platform_driver_register(&altr_sysmgr_driver);
+}
+core_initcall(altr_sysmgr_init);
+
+static void __exit altr_sysmgr_exit(void)
+{
+ platform_driver_unregister(&altr_sysmgr_driver);
+}
+module_exit(altr_sysmgr_exit);
+
+MODULE_AUTHOR("Thor Thayer <>");
+MODULE_DESCRIPTION("SOCFPGA System Manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index e82543bcfdc8..35a9e16f9902 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -141,6 +141,7 @@ static const struct of_device_id atmel_hlcdc_match[] = {
{ .compatible = "atmel,sama5d2-hlcdc" },
{ .compatible = "atmel,sama5d3-hlcdc" },
{ .compatible = "atmel,sama5d4-hlcdc" },
+ { .compatible = "microchip,sam9x60-hlcdc" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_match);
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index a7b7c5423ea5..c2e8a0dee7f8 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -65,6 +65,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
{ .compatible = "x-powers,axp202", .data = (void *)AXP202_ID },
{ .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
{ .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
+ { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
{ .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
{ },
};
@@ -75,6 +76,7 @@ static const struct i2c_device_id axp20x_i2c_id[] = {
{ "axp202", 0 },
{ "axp209", 0 },
{ "axp221", 0 },
+ { "axp223", 0 },
{ "axp806", 0 },
{ },
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 3c97f2c0fdfe..2215660dfa05 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -198,6 +198,12 @@ static const struct resource axp22x_usb_power_supply_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
};
+/* AXP803 and AXP813/AXP818 share the same interrupts */
+static const struct resource axp803_usb_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP803_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP803_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+};
+
static const struct resource axp22x_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
@@ -741,6 +747,11 @@ static const struct mfd_cell axp803_cells[] = {
.of_compatible = "x-powers,axp813-ac-power-supply",
.num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
.resources = axp20x_ac_power_supply_resources,
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp803_usb_power_supply_resources),
+ .resources = axp803_usb_power_supply_resources,
+ .of_compatible = "x-powers,axp813-usb-power-supply",
},
{ .name = "axp20x-regulator" },
};
@@ -793,6 +804,11 @@ static const struct mfd_cell axp813_cells[] = {
.of_compatible = "x-powers,axp813-ac-power-supply",
.num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
.resources = axp20x_ac_power_supply_resources,
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp803_usb_power_supply_resources),
+ .resources = axp803_usb_power_supply_resources,
+ .of_compatible = "x-powers,axp813-usb-power-supply",
},
};
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 6acfe036d522..bd2bcdd4718b 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -75,20 +75,49 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
{
+ int ret;
struct {
struct cros_ec_command msg;
- struct ec_params_host_sleep_event req;
+ union {
+ struct ec_params_host_sleep_event req0;
+ struct ec_params_host_sleep_event_v1 req1;
+ struct ec_response_host_sleep_event_v1 resp1;
+ } u;
} __packed buf;
memset(&buf, 0, sizeof(buf));
- buf.req.sleep_event = sleep_event;
+ if (ec_dev->host_sleep_v1) {
+ buf.u.req1.sleep_event = sleep_event;
+ buf.u.req1.suspend_params.sleep_timeout_ms =
+ EC_HOST_SLEEP_TIMEOUT_DEFAULT;
+
+ buf.msg.outsize = sizeof(buf.u.req1);
+ if ((sleep_event == HOST_SLEEP_EVENT_S3_RESUME) ||
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME))
+ buf.msg.insize = sizeof(buf.u.resp1);
+
+ buf.msg.version = 1;
+
+ } else {
+ buf.u.req0.sleep_event = sleep_event;
+ buf.msg.outsize = sizeof(buf.u.req0);
+ }
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
- buf.msg.version = 0;
- buf.msg.outsize = sizeof(buf.req);
- return cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+
+ /* For now, report failure to transition to S0ix with a warning. */
+ if (ret >= 0 && ec_dev->host_sleep_v1 &&
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME))
+ WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
+ EC_HOST_RESUME_SLEEP_TIMEOUT,
+ "EC detected sleep transition timeout. Total slp_s0 transitions: %d",
+ buf.u.resp1.resume_response.sleep_transitions &
+ EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
+
+ return ret;
}
int cros_ec_register(struct cros_ec_device *ec_dev)
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index d275deaecb12..54a58df571b6 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -385,7 +385,8 @@ static const struct mfd_cell cros_ec_rtc_cells[] = {
};
static const struct mfd_cell cros_usbpd_charger_cells[] = {
- { .name = "cros-usbpd-charger" }
+ { .name = "cros-usbpd-charger" },
+ { .name = "cros-usbpd-logger" },
};
static const struct mfd_cell cros_ec_platform_cells[] = {
@@ -418,6 +419,39 @@ static int ec_device_probe(struct platform_device *pdev)
device_initialize(&ec->class_dev);
cdev_init(&ec->cdev, &fops);
+ /* Check whether this is actually a Fingerprint MCU rather than an EC */
+ if (cros_ec_check_features(ec, EC_FEATURE_FINGERPRINT)) {
+ dev_info(dev, "CrOS Fingerprint MCU detected.\n");
+ /*
+ * Help userspace differentiating ECs from FP MCU,
+ * regardless of the probing order.
+ */
+ ec_platform->ec_name = CROS_EC_DEV_FP_NAME;
+ }
+
+ /*
+ * Check whether this is actually an Integrated Sensor Hub (ISH)
+ * rather than an EC.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_ISH)) {
+ dev_info(dev, "CrOS ISH MCU detected.\n");
+ /*
+ * Help userspace differentiating ECs from ISH MCU,
+ * regardless of the probing order.
+ */
+ ec_platform->ec_name = CROS_EC_DEV_ISH_NAME;
+ }
+
+ /* Check whether this is actually a Touchpad MCU rather than an EC */
+ if (cros_ec_check_features(ec, EC_FEATURE_TOUCHPAD)) {
+ dev_info(dev, "CrOS Touchpad MCU detected.\n");
+ /*
+ * Help userspace differentiating ECs from TP MCU,
+ * regardless of the probing order.
+ */
+ ec_platform->ec_name = CROS_EC_DEV_TP_NAME;
+ }
+
/*
* Add the class device
* Link to the character device for creating the /dev entry
diff --git a/drivers/mfd/cs47l35-tables.c b/drivers/mfd/cs47l35-tables.c
index 604c9dd14df5..338b825127f1 100644
--- a/drivers/mfd/cs47l35-tables.c
+++ b/drivers/mfd/cs47l35-tables.c
@@ -178,6 +178,7 @@ static const struct reg_default cs47l35_reg_default[] = {
{ 0x00000448, 0x0a83 }, /* R1096 (0x448) - eDRE Enable */
{ 0x0000044a, 0x0000 }, /* R1098 (0x44a) - eDRE Manual */
{ 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
+ { 0x00000451, 0x0000 }, /* R1105 (0x451) - DAC AEC Control 2 */
{ 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
@@ -970,6 +971,7 @@ static bool cs47l35_16bit_readable_register(struct device *dev,
case MADERA_EDRE_ENABLE:
case MADERA_EDRE_MANUAL:
case MADERA_DAC_AEC_CONTROL_1:
+ case MADERA_DAC_AEC_CONTROL_2:
case MADERA_NOISE_GATE_CONTROL:
case MADERA_PDM_SPK1_CTRL_1:
case MADERA_PDM_SPK1_CTRL_2:
diff --git a/drivers/mfd/cs47l90-tables.c b/drivers/mfd/cs47l90-tables.c
index 77207d98f0cc..c040d3d7232a 100644
--- a/drivers/mfd/cs47l90-tables.c
+++ b/drivers/mfd/cs47l90-tables.c
@@ -263,6 +263,7 @@ static const struct reg_default cs47l90_reg_default[] = {
{ 0x00000440, 0x003f }, /* R1088 (0x440) - DRE Enable */
{ 0x00000448, 0x003f }, /* R1096 (0x448) - eDRE Enable */
{ 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
+ { 0x00000451, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 2 */
{ 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
@@ -1692,6 +1693,7 @@ static bool cs47l90_16bit_readable_register(struct device *dev,
case MADERA_DRE_ENABLE:
case MADERA_EDRE_ENABLE:
case MADERA_DAC_AEC_CONTROL_1:
+ case MADERA_DAC_AEC_CONTROL_2:
case MADERA_NOISE_GATE_CONTROL:
case MADERA_PDM_SPK1_CTRL_1:
case MADERA_PDM_SPK1_CTRL_2:
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index 6e4ce49b4405..b125f90dd375 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * da9063-core.c: Device access for Dialog DA9063 modules
+ * Device access for Dialog DA9063 modules
*
* Copyright 2012 Dialog Semiconductors Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
@@ -7,11 +8,6 @@
* Author: Krystian Garbaciak, Dialog Semiconductor
* Author: Michal Hajduk, Dialog Semiconductor
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -26,7 +22,6 @@
#include <linux/regmap.h>
#include <linux/mfd/da9063/core.h>
-#include <linux/mfd/da9063/pdata.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/proc_fs.h>
@@ -165,7 +160,6 @@ static int da9063_clear_fault_log(struct da9063 *da9063)
int da9063_device_init(struct da9063 *da9063, unsigned int irq)
{
- struct da9063_pdata *pdata = da9063->dev->platform_data;
int model, variant_id, variant_code;
int ret;
@@ -173,24 +167,10 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
if (ret < 0)
dev_err(da9063->dev, "Cannot clear fault log\n");
- if (pdata) {
- da9063->flags = pdata->flags;
- da9063->irq_base = pdata->irq_base;
- } else {
- da9063->flags = 0;
- da9063->irq_base = -1;
- }
+ da9063->flags = 0;
+ da9063->irq_base = -1;
da9063->chip_irq = irq;
- if (pdata && pdata->init != NULL) {
- ret = pdata->init(da9063);
- if (ret != 0) {
- dev_err(da9063->dev,
- "Platform initialization failed.\n");
- return ret;
- }
- }
-
ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model);
if (ret < 0) {
dev_err(da9063->dev, "Cannot read chip model id.\n");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 50a24b1921d0..455de74c0dd2 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -1,15 +1,10 @@
-/* da9063-i2c.c: Interrupt support for Dialog DA9063
+// SPDX-License-Identifier: GPL-2.0+
+/* I2C support for Dialog DA9063
*
* Copyright 2012 Dialog Semiconductor Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
*
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -22,7 +17,6 @@
#include <linux/mfd/core.h>
#include <linux/mfd/da9063/core.h>
-#include <linux/mfd/da9063/pdata.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/of.h>
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index ecc0c8ce6c58..e2bbedf58e68 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -1,15 +1,10 @@
-/* da9063-irq.c: Interrupts support for Dialog DA9063
+// SPDX-License-Identifier: GPL-2.0+
+/* Interrupt support for Dialog DA9063
*
* Copyright 2012 Dialog Semiconductor Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
*
* Author: Michal Hajduk, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -19,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/regmap.h>
#include <linux/mfd/da9063/core.h>
-#include <linux/mfd/da9063/pdata.h>
#define DA9063_REG_EVENT_A_OFFSET 0
#define DA9063_REG_EVENT_B_OFFSET 1
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index cba2eb166650..6b111be944d9 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -129,6 +129,19 @@ static const struct intel_lpss_platform_info cnl_i2c_info = {
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
+ /* CML */
+ { PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02c5), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02c6), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02c7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x02e8), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02e9), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 45221e092ecf..fc6aa4c50144 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -18,6 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
#include <linux/idr.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -273,6 +274,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
{
u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
+ /* Set the device in reset state */
+ writel(0, lpss->priv + LPSS_PRIV_RESETS);
+
intel_lpss_deassert_reset(lpss);
intel_lpss_set_remap_addr(lpss);
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 5bddb84cfc1f..11adbf77960d 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -74,16 +74,6 @@ static const struct dmi_system_id dmi_platform_info[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-0YA2"),
- },
- .driver_data = (void *)400000,
- },
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)400000,
},
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 64a3aece9c5e..be84bb2aa837 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -60,6 +60,7 @@ static struct mfd_cell cht_wc_dev[] = {
.resources = cht_wc_ext_charger_resources,
},
{ .name = "cht_wcove_region", },
+ { .name = "cht_wcove_leds", },
};
/*
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index d8ddd1a6f304..436361ce3737 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -37,6 +37,8 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+static struct max77620_chip *max77620_scratch;
+
static const struct resource gpio_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_GPIO),
};
@@ -111,6 +113,26 @@ static const struct mfd_cell max20024_children[] = {
},
};
+static const struct mfd_cell max77663_children[] = {
+ { .name = "max77620-pinctrl", },
+ { .name = "max77620-clock", },
+ { .name = "max77663-pmic", },
+ { .name = "max77620-watchdog", },
+ {
+ .name = "max77620-gpio",
+ .resources = gpio_resources,
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ }, {
+ .name = "max77620-rtc",
+ .resources = rtc_resources,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ }, {
+ .name = "max77663-power",
+ .resources = power_resources,
+ .num_resources = ARRAY_SIZE(power_resources),
+ },
+};
+
static const struct regmap_range max77620_readable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
};
@@ -171,6 +193,35 @@ static const struct regmap_config max20024_regmap_config = {
.volatile_table = &max77620_volatile_table,
};
+static const struct regmap_range max77663_readable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_CID5),
+};
+
+static const struct regmap_access_table max77663_readable_table = {
+ .yes_ranges = max77663_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77663_readable_ranges),
+};
+
+static const struct regmap_range max77663_writable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_CID5),
+};
+
+static const struct regmap_access_table max77663_writable_table = {
+ .yes_ranges = max77663_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77663_writable_ranges),
+};
+
+static const struct regmap_config max77663_regmap_config = {
+ .name = "power-slave",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77620_REG_CID5 + 1,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &max77663_readable_table,
+ .wr_table = &max77663_writable_table,
+ .volatile_table = &max77620_volatile_table,
+};
+
/*
* MAX77620 and MAX20024 has the following steps of the interrupt handling
* for TOP interrupts:
@@ -240,6 +291,9 @@ static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
case MAX77620:
fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
break;
+ case MAX77663:
+ fps_min_period = MAX20024_FPS_PERIOD_MIN_US;
+ break;
default:
return -EINVAL;
}
@@ -274,6 +328,9 @@ static int max77620_config_fps(struct max77620_chip *chip,
case MAX77620:
fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
break;
+ case MAX77663:
+ fps_max_period = MAX20024_FPS_PERIOD_MAX_US;
+ break;
default:
return -EINVAL;
}
@@ -375,6 +432,9 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
}
skip_fps:
+ if (chip->chip_id == MAX77663)
+ return 0;
+
/* Enable wake on EN0 pin */
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_WK_EN0,
@@ -423,6 +483,15 @@ static int max77620_read_es_version(struct max77620_chip *chip)
return ret;
}
+static void max77620_pm_power_off(void)
+{
+ struct max77620_chip *chip = max77620_scratch;
+
+ regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG1,
+ MAX77620_ONOFFCNFG1_SFT_RST,
+ MAX77620_ONOFFCNFG1_SFT_RST);
+}
+
static int max77620_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -430,6 +499,7 @@ static int max77620_probe(struct i2c_client *client,
struct max77620_chip *chip;
const struct mfd_cell *mfd_cells;
int n_mfd_cells;
+ bool pm_off;
int ret;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
@@ -453,6 +523,11 @@ static int max77620_probe(struct i2c_client *client,
n_mfd_cells = ARRAY_SIZE(max20024_children);
rmap_config = &max20024_regmap_config;
break;
+ case MAX77663:
+ mfd_cells = max77663_children;
+ n_mfd_cells = ARRAY_SIZE(max77663_children);
+ rmap_config = &max77663_regmap_config;
+ break;
default:
dev_err(chip->dev, "ChipID is invalid %d\n", chip->chip_id);
return -EINVAL;
@@ -491,6 +566,12 @@ static int max77620_probe(struct i2c_client *client,
return ret;
}
+ pm_off = of_device_is_system_power_controller(client->dev.of_node);
+ if (pm_off && !pm_power_off) {
+ max77620_scratch = chip;
+ pm_power_off = max77620_pm_power_off;
+ }
+
return 0;
}
@@ -546,6 +627,9 @@ static int max77620_i2c_suspend(struct device *dev)
return ret;
}
+ if (chip->chip_id == MAX77663)
+ goto out;
+
/* Disable WK_EN0 */
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_WK_EN0, 0);
@@ -581,7 +665,7 @@ static int max77620_i2c_resume(struct device *dev)
* For MAX20024: No need to configure WKEN0 on resume as
* it is configured on Init.
*/
- if (chip->chip_id == MAX20024)
+ if (chip->chip_id == MAX20024 || chip->chip_id == MAX77663)
goto out;
/* Enable WK_EN0 */
@@ -603,6 +687,7 @@ out:
static const struct i2c_device_id max77620_id[] = {
{"max77620", MAX77620},
{"max20024", MAX20024},
+ {"max77663", MAX77663},
{},
};
diff --git a/drivers/mfd/max77650.c b/drivers/mfd/max77650.c
new file mode 100644
index 000000000000..60e07aca6ae5
--- /dev/null
+++ b/drivers/mfd/max77650.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// Core MFD driver for MAXIM 77650/77651 charger/power-supply.
+// Programming manual: https://pdfserv.maximintegrated.com/en/an/AN6428.pdf
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define MAX77650_INT_GPI_F_MSK BIT(0)
+#define MAX77650_INT_GPI_R_MSK BIT(1)
+#define MAX77650_INT_GPI_MSK \
+ (MAX77650_INT_GPI_F_MSK | MAX77650_INT_GPI_R_MSK)
+#define MAX77650_INT_nEN_F_MSK BIT(2)
+#define MAX77650_INT_nEN_R_MSK BIT(3)
+#define MAX77650_INT_TJAL1_R_MSK BIT(4)
+#define MAX77650_INT_TJAL2_R_MSK BIT(5)
+#define MAX77650_INT_DOD_R_MSK BIT(6)
+
+#define MAX77650_INT_THM_MSK BIT(0)
+#define MAX77650_INT_CHG_MSK BIT(1)
+#define MAX77650_INT_CHGIN_MSK BIT(2)
+#define MAX77650_INT_TJ_REG_MSK BIT(3)
+#define MAX77650_INT_CHGIN_CTRL_MSK BIT(4)
+#define MAX77650_INT_SYS_CTRL_MSK BIT(5)
+#define MAX77650_INT_SYS_CNFG_MSK BIT(6)
+
+#define MAX77650_INT_GLBL_OFFSET 0
+#define MAX77650_INT_CHG_OFFSET 1
+
+#define MAX77650_SBIA_LPM_MASK BIT(5)
+#define MAX77650_SBIA_LPM_DISABLED 0x00
+
+enum {
+ MAX77650_INT_GPI,
+ MAX77650_INT_nEN_F,
+ MAX77650_INT_nEN_R,
+ MAX77650_INT_TJAL1_R,
+ MAX77650_INT_TJAL2_R,
+ MAX77650_INT_DOD_R,
+ MAX77650_INT_THM,
+ MAX77650_INT_CHG,
+ MAX77650_INT_CHGIN,
+ MAX77650_INT_TJ_REG,
+ MAX77650_INT_CHGIN_CTRL,
+ MAX77650_INT_SYS_CTRL,
+ MAX77650_INT_SYS_CNFG,
+};
+
+static const struct resource max77650_charger_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_CHG, "CHG"),
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_CHGIN, "CHGIN"),
+};
+
+static const struct resource max77650_gpio_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_GPI, "GPI"),
+};
+
+static const struct resource max77650_onkey_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_nEN_F, "nEN_F"),
+ DEFINE_RES_IRQ_NAMED(MAX77650_INT_nEN_R, "nEN_R"),
+};
+
+static const struct mfd_cell max77650_cells[] = {
+ {
+ .name = "max77650-regulator",
+ .of_compatible = "maxim,max77650-regulator",
+ }, {
+ .name = "max77650-charger",
+ .of_compatible = "maxim,max77650-charger",
+ .resources = max77650_charger_resources,
+ .num_resources = ARRAY_SIZE(max77650_charger_resources),
+ }, {
+ .name = "max77650-gpio",
+ .of_compatible = "maxim,max77650-gpio",
+ .resources = max77650_gpio_resources,
+ .num_resources = ARRAY_SIZE(max77650_gpio_resources),
+ }, {
+ .name = "max77650-led",
+ .of_compatible = "maxim,max77650-led",
+ }, {
+ .name = "max77650-onkey",
+ .of_compatible = "maxim,max77650-onkey",
+ .resources = max77650_onkey_resources,
+ .num_resources = ARRAY_SIZE(max77650_onkey_resources),
+ },
+};
+
+static const struct regmap_irq max77650_irqs[] = {
+ [MAX77650_INT_GPI] = {
+ .reg_offset = MAX77650_INT_GLBL_OFFSET,
+ .mask = MAX77650_INT_GPI_MSK,
+ .type = {
+ .type_falling_val = MAX77650_INT_GPI_F_MSK,
+ .type_rising_val = MAX77650_INT_GPI_R_MSK,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
+ },
+ REGMAP_IRQ_REG(MAX77650_INT_nEN_F,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_nEN_F_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_nEN_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_nEN_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_TJAL1_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_TJAL1_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_TJAL2_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_TJAL2_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_DOD_R,
+ MAX77650_INT_GLBL_OFFSET, MAX77650_INT_DOD_R_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_THM,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_THM_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_CHG,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_CHG_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_CHGIN,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_CHGIN_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_TJ_REG,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_TJ_REG_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_CHGIN_CTRL,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_CHGIN_CTRL_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_SYS_CTRL,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_SYS_CTRL_MSK),
+ REGMAP_IRQ_REG(MAX77650_INT_SYS_CNFG,
+ MAX77650_INT_CHG_OFFSET, MAX77650_INT_SYS_CNFG_MSK),
+};
+
+static const struct regmap_irq_chip max77650_irq_chip = {
+ .name = "max77650-irq",
+ .irqs = max77650_irqs,
+ .num_irqs = ARRAY_SIZE(max77650_irqs),
+ .num_regs = 2,
+ .status_base = MAX77650_REG_INT_GLBL,
+ .mask_base = MAX77650_REG_INTM_GLBL,
+ .type_in_mask = true,
+ .type_invert = true,
+ .init_ack_masked = true,
+ .clear_on_unmask = true,
+};
+
+static const struct regmap_config max77650_regmap_config = {
+ .name = "max77650",
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int max77650_i2c_probe(struct i2c_client *i2c)
+{
+ struct regmap_irq_chip_data *irq_data;
+ struct device *dev = &i2c->dev;
+ struct irq_domain *domain;
+ struct regmap *map;
+ unsigned int val;
+ int rv, id;
+
+ map = devm_regmap_init_i2c(i2c, &max77650_regmap_config);
+ if (IS_ERR(map)) {
+ dev_err(dev, "Unable to initialise I2C Regmap\n");
+ return PTR_ERR(map);
+ }
+
+ rv = regmap_read(map, MAX77650_REG_CID, &val);
+ if (rv) {
+ dev_err(dev, "Unable to read Chip ID\n");
+ return rv;
+ }
+
+ id = MAX77650_CID_BITS(val);
+ switch (id) {
+ case MAX77650_CID_77650A:
+ case MAX77650_CID_77650C:
+ case MAX77650_CID_77651A:
+ case MAX77650_CID_77651B:
+ break;
+ default:
+ dev_err(dev, "Chip not supported - ID: 0x%02x\n", id);
+ return -ENODEV;
+ }
+
+ /*
+ * This IC has a low-power mode which reduces the quiescent current
+ * consumption to ~5.6uA but is only suitable for systems consuming
+ * less than ~2mA. Since this is not likely the case even on
+ * linux-based wearables - keep the chip in normal power mode.
+ */
+ rv = regmap_update_bits(map,
+ MAX77650_REG_CNFG_GLBL,
+ MAX77650_SBIA_LPM_MASK,
+ MAX77650_SBIA_LPM_DISABLED);
+ if (rv) {
+ dev_err(dev, "Unable to change the power mode\n");
+ return rv;
+ }
+
+ rv = devm_regmap_add_irq_chip(dev, map, i2c->irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77650_irq_chip, &irq_data);
+ if (rv) {
+ dev_err(dev, "Unable to add Regmap IRQ chip\n");
+ return rv;
+ }
+
+ domain = regmap_irq_get_domain(irq_data);
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ max77650_cells, ARRAY_SIZE(max77650_cells),
+ NULL, 0, domain);
+}
+
+static const struct of_device_id max77650_of_match[] = {
+ { .compatible = "maxim,max77650" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_of_match);
+
+static struct i2c_driver max77650_i2c_driver = {
+ .driver = {
+ .name = "max77650",
+ .of_match_table = of_match_ptr(max77650_of_match),
+ },
+ .probe_new = max77650_i2c_probe,
+};
+module_i2c_driver(max77650_i2c_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 multi-function core driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 94e3f32ce935..1ade4c8cc91f 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -269,6 +269,19 @@ fail_alloc:
return ret;
}
+/**
+ * mfd_add_devices - register child devices
+ *
+ * @parent: Pointer to parent device.
+ * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care
+ * of device numbering, or will be added to a device's cell_id.
+ * @cells: Array of (struct mfd_cell)s describing child devices.
+ * @n_devs: Number of child devices to register.
+ * @mem_base: Parent register range resource for child devices.
+ * @irq_base: Base of the range of virtual interrupt numbers allocated for
+ * this MFD device. Unused if @domain is specified.
+ * @domain: Interrupt domain to create mappings for hardware interrupts.
+ */
int mfd_add_devices(struct device *parent, int id,
const struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 216fbf6adec9..94377782d208 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -568,14 +568,6 @@ static int rk808_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id rk808_ids[] = {
- { "rk805" },
- { "rk808" },
- { "rk818" },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, rk808_ids);
-
static struct i2c_driver rk808_i2c_driver = {
.driver = {
.name = "rk808",
@@ -583,7 +575,6 @@ static struct i2c_driver rk808_i2c_driver = {
},
.probe = rk808_probe,
.remove = rk808_remove,
- .id_table = rk808_ids,
};
module_i2c_driver(rk808_i2c_driver);
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 521319086c81..95473ff9bb4b 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -28,45 +28,33 @@
#include <linux/regmap.h>
static const struct mfd_cell s5m8751_devs[] = {
- {
- .name = "s5m8751-pmic",
- }, {
- .name = "s5m-charger",
- }, {
- .name = "s5m8751-codec",
- },
+ { .name = "s5m8751-pmic", },
+ { .name = "s5m-charger", },
+ { .name = "s5m8751-codec", },
};
static const struct mfd_cell s5m8763_devs[] = {
- {
- .name = "s5m8763-pmic",
- }, {
- .name = "s5m-rtc",
- }, {
- .name = "s5m-charger",
- },
+ { .name = "s5m8763-pmic", },
+ { .name = "s5m-rtc", },
+ { .name = "s5m-charger", },
};
static const struct mfd_cell s5m8767_devs[] = {
+ { .name = "s5m8767-pmic", },
+ { .name = "s5m-rtc", },
{
- .name = "s5m8767-pmic",
- }, {
- .name = "s5m-rtc",
- }, {
.name = "s5m8767-clk",
.of_compatible = "samsung,s5m8767-clk",
- }
+ },
};
static const struct mfd_cell s2mps11_devs[] = {
+ { .name = "s2mps11-regulator", },
+ { .name = "s2mps14-rtc", },
{
- .name = "s2mps11-regulator",
- }, {
- .name = "s2mps14-rtc",
- }, {
.name = "s2mps11-clk",
.of_compatible = "samsung,s2mps11-clk",
- }
+ },
};
static const struct mfd_cell s2mps13_devs[] = {
@@ -79,37 +67,30 @@ static const struct mfd_cell s2mps13_devs[] = {
};
static const struct mfd_cell s2mps14_devs[] = {
+ { .name = "s2mps14-regulator", },
+ { .name = "s2mps14-rtc", },
{
- .name = "s2mps14-regulator",
- }, {
- .name = "s2mps14-rtc",
- }, {
.name = "s2mps14-clk",
.of_compatible = "samsung,s2mps14-clk",
- }
+ },
};
static const struct mfd_cell s2mps15_devs[] = {
+ { .name = "s2mps15-regulator", },
+ { .name = "s2mps15-rtc", },
{
- .name = "s2mps15-regulator",
- }, {
- .name = "s2mps15-rtc",
- }, {
.name = "s2mps13-clk",
.of_compatible = "samsung,s2mps13-clk",
},
};
static const struct mfd_cell s2mpa01_devs[] = {
- {
- .name = "s2mpa01-pmic",
- },
+ { .name = "s2mpa01-pmic", },
+ { .name = "s2mps14-rtc", },
};
static const struct mfd_cell s2mpu02_devs[] = {
- {
- .name = "s2mpu02-regulator",
- },
+ { .name = "s2mpu02-regulator", },
};
#ifdef CONFIG_OF
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index ad0099077e7e..a98c5d165039 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -455,6 +455,9 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
case S5M8767X:
sec_irq_chip = &s5m8767_irq_chip;
break;
+ case S2MPA01:
+ sec_irq_chip = &s2mps14_irq_chip;
+ break;
case S2MPS11X:
sec_irq_chip = &s2mps11_irq_chip;
break;
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 36b96fee4ce6..0ae27cd30268 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -80,8 +80,6 @@ struct ssbi {
int (*write)(struct ssbi *, u16 addr, const u8 *buf, int len);
};
-#define to_ssbi(dev) platform_get_drvdata(to_platform_device(dev))
-
static inline u32 ssbi_readl(struct ssbi *ssbi, u32 reg)
{
return readl(ssbi->base + reg);
@@ -243,7 +241,7 @@ err:
int ssbi_read(struct device *dev, u16 addr, u8 *buf, int len)
{
- struct ssbi *ssbi = to_ssbi(dev);
+ struct ssbi *ssbi = dev_get_drvdata(dev);
unsigned long flags;
int ret;
@@ -257,7 +255,7 @@ EXPORT_SYMBOL_GPL(ssbi_read);
int ssbi_write(struct device *dev, u16 addr, const u8 *buf, int len)
{
- struct ssbi *ssbi = to_ssbi(dev);
+ struct ssbi *ssbi = dev_get_drvdata(dev);
unsigned long flags;
int ret;
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
new file mode 100644
index 000000000000..fe8efba2d45f
--- /dev/null
+++ b/drivers/mfd/stmfx.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STMicroelectronics Multi-Function eXpander (STMFX) core
+ *
+ * Copyright (C) 2019 STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>.
+ */
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stmfx.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+static bool stmfx_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STMFX_REG_SYS_CTRL:
+ case STMFX_REG_IRQ_SRC_EN:
+ case STMFX_REG_IRQ_PENDING:
+ case STMFX_REG_IRQ_GPI_PENDING1:
+ case STMFX_REG_IRQ_GPI_PENDING2:
+ case STMFX_REG_IRQ_GPI_PENDING3:
+ case STMFX_REG_GPIO_STATE1:
+ case STMFX_REG_GPIO_STATE2:
+ case STMFX_REG_GPIO_STATE3:
+ case STMFX_REG_IRQ_GPI_SRC1:
+ case STMFX_REG_IRQ_GPI_SRC2:
+ case STMFX_REG_IRQ_GPI_SRC3:
+ case STMFX_REG_GPO_SET1:
+ case STMFX_REG_GPO_SET2:
+ case STMFX_REG_GPO_SET3:
+ case STMFX_REG_GPO_CLR1:
+ case STMFX_REG_GPO_CLR2:
+ case STMFX_REG_GPO_CLR3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool stmfx_reg_writeable(struct device *dev, unsigned int reg)
+{
+ return (reg >= STMFX_REG_SYS_CTRL);
+}
+
+static const struct regmap_config stmfx_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .max_register = STMFX_REG_MAX,
+ .volatile_reg = stmfx_reg_volatile,
+ .writeable_reg = stmfx_reg_writeable,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct resource stmfx_pinctrl_resources[] = {
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_GPIO),
+};
+
+static const struct resource stmfx_idd_resources[] = {
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_IDD),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_ERROR),
+};
+
+static const struct resource stmfx_ts_resources[] = {
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_DET),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_NE),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_TH),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_FULL),
+ DEFINE_RES_IRQ(STMFX_REG_IRQ_SRC_EN_TS_OVF),
+};
+
+static struct mfd_cell stmfx_cells[] = {
+ {
+ .of_compatible = "st,stmfx-0300-pinctrl",
+ .name = "stmfx-pinctrl",
+ .resources = stmfx_pinctrl_resources,
+ .num_resources = ARRAY_SIZE(stmfx_pinctrl_resources),
+ },
+ {
+ .of_compatible = "st,stmfx-0300-idd",
+ .name = "stmfx-idd",
+ .resources = stmfx_idd_resources,
+ .num_resources = ARRAY_SIZE(stmfx_idd_resources),
+ },
+ {
+ .of_compatible = "st,stmfx-0300-ts",
+ .name = "stmfx-ts",
+ .resources = stmfx_ts_resources,
+ .num_resources = ARRAY_SIZE(stmfx_ts_resources),
+ },
+};
+
+static u8 stmfx_func_to_mask(u32 func)
+{
+ u8 mask = 0;
+
+ if (func & STMFX_FUNC_GPIO)
+ mask |= STMFX_REG_SYS_CTRL_GPIO_EN;
+
+ if ((func & STMFX_FUNC_ALTGPIO_LOW) || (func & STMFX_FUNC_ALTGPIO_HIGH))
+ mask |= STMFX_REG_SYS_CTRL_ALTGPIO_EN;
+
+ if (func & STMFX_FUNC_TS)
+ mask |= STMFX_REG_SYS_CTRL_TS_EN;
+
+ if (func & STMFX_FUNC_IDD)
+ mask |= STMFX_REG_SYS_CTRL_IDD_EN;
+
+ return mask;
+}
+
+int stmfx_function_enable(struct stmfx *stmfx, u32 func)
+{
+ u32 sys_ctrl;
+ u8 mask;
+ int ret;
+
+ ret = regmap_read(stmfx->map, STMFX_REG_SYS_CTRL, &sys_ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * IDD and TS have priority in STMFX FW, so if IDD and TS are enabled,
+ * ALTGPIO function is disabled by STMFX FW. If IDD or TS is enabled,
+ * the number of aGPIO available decreases. To avoid GPIO management
+ * disturbance, abort IDD or TS function enable in this case.
+ */
+ if (((func & STMFX_FUNC_IDD) || (func & STMFX_FUNC_TS)) &&
+ (sys_ctrl & STMFX_REG_SYS_CTRL_ALTGPIO_EN)) {
+ dev_err(stmfx->dev, "ALTGPIO function already enabled\n");
+ return -EBUSY;
+ }
+
+ /* If TS is enabled, aGPIO[3:0] cannot be used */
+ if ((func & STMFX_FUNC_ALTGPIO_LOW) &&
+ (sys_ctrl & STMFX_REG_SYS_CTRL_TS_EN)) {
+ dev_err(stmfx->dev, "TS in use, aGPIO[3:0] unavailable\n");
+ return -EBUSY;
+ }
+
+ /* If IDD is enabled, aGPIO[7:4] cannot be used */
+ if ((func & STMFX_FUNC_ALTGPIO_HIGH) &&
+ (sys_ctrl & STMFX_REG_SYS_CTRL_IDD_EN)) {
+ dev_err(stmfx->dev, "IDD in use, aGPIO[7:4] unavailable\n");
+ return -EBUSY;
+ }
+
+ mask = stmfx_func_to_mask(func);
+
+ return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, mask);
+}
+EXPORT_SYMBOL_GPL(stmfx_function_enable);
+
+int stmfx_function_disable(struct stmfx *stmfx, u32 func)
+{
+ u8 mask = stmfx_func_to_mask(func);
+
+ return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, 0);
+}
+EXPORT_SYMBOL_GPL(stmfx_function_disable);
+
+static void stmfx_irq_bus_lock(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&stmfx->lock);
+}
+
+static void stmfx_irq_bus_sync_unlock(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, stmfx->irq_src);
+
+ mutex_unlock(&stmfx->lock);
+}
+
+static void stmfx_irq_mask(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ stmfx->irq_src &= ~BIT(data->hwirq % 8);
+}
+
+static void stmfx_irq_unmask(struct irq_data *data)
+{
+ struct stmfx *stmfx = irq_data_get_irq_chip_data(data);
+
+ stmfx->irq_src |= BIT(data->hwirq % 8);
+}
+
+static struct irq_chip stmfx_irq_chip = {
+ .name = "stmfx-core",
+ .irq_bus_lock = stmfx_irq_bus_lock,
+ .irq_bus_sync_unlock = stmfx_irq_bus_sync_unlock,
+ .irq_mask = stmfx_irq_mask,
+ .irq_unmask = stmfx_irq_unmask,
+};
+
+static irqreturn_t stmfx_irq_handler(int irq, void *data)
+{
+ struct stmfx *stmfx = data;
+ unsigned long n, pending;
+ u32 ack;
+ int ret;
+
+ ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING,
+ (u32 *)&pending);
+ if (ret)
+ return IRQ_NONE;
+
+ /*
+ * There is no ACK for GPIO, MFX_REG_IRQ_PENDING_GPIO is a logical OR
+ * of MFX_REG_IRQ_GPI _PENDING1/_PENDING2/_PENDING3
+ */
+ ack = pending & ~BIT(STMFX_REG_IRQ_SRC_EN_GPIO);
+ if (ack) {
+ ret = regmap_write(stmfx->map, STMFX_REG_IRQ_ACK, ack);
+ if (ret)
+ return IRQ_NONE;
+ }
+
+ for_each_set_bit(n, &pending, STMFX_REG_IRQ_SRC_MAX)
+ handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n));
+
+ return IRQ_HANDLED;
+}
+
+static int stmfx_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_chip_and_handler(virq, &stmfx_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(virq, 1);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static void stmfx_irq_unmap(struct irq_domain *d, unsigned int virq)
+{
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+}
+
+static const struct irq_domain_ops stmfx_irq_ops = {
+ .map = stmfx_irq_map,
+ .unmap = stmfx_irq_unmap,
+};
+
+static void stmfx_irq_exit(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+ int hwirq;
+
+ for (hwirq = 0; hwirq < STMFX_REG_IRQ_SRC_MAX; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(stmfx->irq_domain, hwirq));
+
+ irq_domain_remove(stmfx->irq_domain);
+}
+
+static int stmfx_irq_init(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+ u32 irqoutpin = 0, irqtrigger;
+ int ret;
+
+ stmfx->irq_domain = irq_domain_add_simple(stmfx->dev->of_node,
+ STMFX_REG_IRQ_SRC_MAX, 0,
+ &stmfx_irq_ops, stmfx);
+ if (!stmfx->irq_domain) {
+ dev_err(stmfx->dev, "Failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+
+ if (!of_property_read_bool(stmfx->dev->of_node, "drive-open-drain"))
+ irqoutpin |= STMFX_REG_IRQ_OUT_PIN_TYPE;
+
+ irqtrigger = irq_get_trigger_type(client->irq);
+ if ((irqtrigger & IRQ_TYPE_EDGE_RISING) ||
+ (irqtrigger & IRQ_TYPE_LEVEL_HIGH))
+ irqoutpin |= STMFX_REG_IRQ_OUT_PIN_POL;
+
+ ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(stmfx->dev, client->irq,
+ NULL, stmfx_irq_handler,
+ irqtrigger | IRQF_ONESHOT,
+ "stmfx", stmfx);
+ if (ret)
+ stmfx_irq_exit(client);
+
+ return ret;
+}
+
+static int stmfx_chip_reset(struct stmfx *stmfx)
+{
+ int ret;
+
+ ret = regmap_write(stmfx->map, STMFX_REG_SYS_CTRL,
+ STMFX_REG_SYS_CTRL_SWRST);
+ if (ret)
+ return ret;
+
+ msleep(STMFX_BOOT_TIME_MS);
+
+ return ret;
+}
+
+static int stmfx_chip_init(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+ u32 id;
+ u8 version[2];
+ int ret;
+
+ stmfx->vdd = devm_regulator_get_optional(&client->dev, "vdd");
+ ret = PTR_ERR_OR_ZERO(stmfx->vdd);
+ if (ret == -ENODEV) {
+ stmfx->vdd = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else if (ret) {
+ dev_err(&client->dev, "Failed to get VDD regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (stmfx->vdd) {
+ ret = regulator_enable(stmfx->vdd);
+ if (ret) {
+ dev_err(&client->dev, "VDD enable failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regmap_read(stmfx->map, STMFX_REG_CHIP_ID, &id);
+ if (ret) {
+ dev_err(&client->dev, "Error reading chip ID: %d\n", ret);
+ goto err;
+ }
+
+ /*
+ * Check that ID is the complement of the I2C address:
+ * STMFX I2C address follows the 7-bit format (MSB), that's why
+ * client->addr is shifted.
+ *
+ * STMFX_I2C_ADDR| STMFX | Linux
+ * input pin | I2C device address | I2C device address
+ *---------------------------------------------------------
+ * 0 | b: 1000 010x h:0x84 | 0x42
+ * 1 | b: 1000 011x h:0x86 | 0x43
+ */
+ if (FIELD_GET(STMFX_REG_CHIP_ID_MASK, ~id) != (client->addr << 1)) {
+ dev_err(&client->dev, "Unknown chip ID: %#x\n", id);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = regmap_bulk_read(stmfx->map, STMFX_REG_FW_VERSION_MSB,
+ version, ARRAY_SIZE(version));
+ if (ret) {
+ dev_err(&client->dev, "Error reading FW version: %d\n", ret);
+ goto err;
+ }
+
+ dev_info(&client->dev, "STMFX id: %#x, fw version: %x.%02x\n",
+ id, version[0], version[1]);
+
+ ret = stmfx_chip_reset(stmfx);
+ if (ret) {
+ dev_err(&client->dev, "Failed to reset chip: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (stmfx->vdd)
+ return regulator_disable(stmfx->vdd);
+
+ return ret;
+}
+
+static int stmfx_chip_exit(struct i2c_client *client)
+{
+ struct stmfx *stmfx = i2c_get_clientdata(client);
+
+ regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, 0);
+ regmap_write(stmfx->map, STMFX_REG_SYS_CTRL, 0);
+
+ if (stmfx->vdd)
+ return regulator_disable(stmfx->vdd);
+
+ return 0;
+}
+
+static int stmfx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct stmfx *stmfx;
+ int ret;
+
+ stmfx = devm_kzalloc(dev, sizeof(*stmfx), GFP_KERNEL);
+ if (!stmfx)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, stmfx);
+
+ stmfx->dev = dev;
+
+ stmfx->map = devm_regmap_init_i2c(client, &stmfx_regmap_config);
+ if (IS_ERR(stmfx->map)) {
+ ret = PTR_ERR(stmfx->map);
+ dev_err(dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&stmfx->lock);
+
+ ret = stmfx_chip_init(client);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ return -EPROBE_DEFER;
+ return ret;
+ }
+
+ if (client->irq < 0) {
+ dev_err(dev, "Failed to get IRQ: %d\n", client->irq);
+ ret = client->irq;
+ goto err_chip_exit;
+ }
+
+ ret = stmfx_irq_init(client);
+ if (ret)
+ goto err_chip_exit;
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ stmfx_cells, ARRAY_SIZE(stmfx_cells), NULL,
+ 0, stmfx->irq_domain);
+ if (ret)
+ goto err_irq_exit;
+
+ return 0;
+
+err_irq_exit:
+ stmfx_irq_exit(client);
+err_chip_exit:
+ stmfx_chip_exit(client);
+
+ return ret;
+}
+
+static int stmfx_remove(struct i2c_client *client)
+{
+ stmfx_irq_exit(client);
+
+ return stmfx_chip_exit(client);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stmfx_suspend(struct device *dev)
+{
+ struct stmfx *stmfx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_raw_read(stmfx->map, STMFX_REG_SYS_CTRL,
+ &stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_read(stmfx->map, STMFX_REG_IRQ_OUT_PIN,
+ &stmfx->bkp_irqoutpin,
+ sizeof(stmfx->bkp_irqoutpin));
+ if (ret)
+ return ret;
+
+ if (stmfx->vdd)
+ return regulator_disable(stmfx->vdd);
+
+ return 0;
+}
+
+static int stmfx_resume(struct device *dev)
+{
+ struct stmfx *stmfx = dev_get_drvdata(dev);
+ int ret;
+
+ if (stmfx->vdd) {
+ ret = regulator_enable(stmfx->vdd);
+ if (ret) {
+ dev_err(stmfx->dev,
+ "VDD enable failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL,
+ &stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN,
+ &stmfx->bkp_irqoutpin,
+ sizeof(stmfx->bkp_irqoutpin));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_SRC_EN,
+ &stmfx->irq_src, sizeof(stmfx->irq_src));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stmfx_dev_pm_ops, stmfx_suspend, stmfx_resume);
+
+static const struct of_device_id stmfx_of_match[] = {
+ { .compatible = "st,stmfx-0300", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stmfx_of_match);
+
+static struct i2c_driver stmfx_driver = {
+ .driver = {
+ .name = "stmfx-core",
+ .of_match_table = of_match_ptr(stmfx_of_match),
+ .pm = &stmfx_dev_pm_ops,
+ },
+ .probe = stmfx_probe,
+ .remove = stmfx_remove,
+};
+module_i2c_driver(stmfx_driver);
+
+MODULE_DESCRIPTION("STMFX core driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index 2b658bed47db..2f12a415b807 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -148,13 +148,12 @@ static const struct of_device_id sun6i_prcm_dt_ids[] = {
static int sun6i_prcm_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
const struct prcm_data *data;
struct resource *res;
int ret;
- match = of_match_node(sun6i_prcm_dt_ids, np);
+ match = of_match_node(sun6i_prcm_dt_ids, pdev->dev.of_node);
if (!match)
return -EINVAL;
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 0ecdffb3d967..f6922a0f8058 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -12,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hwspinlock.h>
#include <linux/io.h>
@@ -45,6 +46,7 @@ static const struct regmap_config syscon_regmap_config = {
static struct syscon *of_syscon_register(struct device_node *np)
{
+ struct clk *clk;
struct syscon *syscon;
struct regmap *regmap;
void __iomem *base;
@@ -119,6 +121,18 @@ static struct syscon *of_syscon_register(struct device_node *np)
goto err_regmap;
}
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ /* clock is optional */
+ if (ret != -ENOENT)
+ goto err_clk;
+ } else {
+ ret = regmap_mmio_attach_clk(regmap, clk);
+ if (ret)
+ goto err_attach;
+ }
+
syscon->regmap = regmap;
syscon->np = np;
@@ -128,6 +142,11 @@ static struct syscon *of_syscon_register(struct device_node *np)
return syscon;
+err_attach:
+ if (!IS_ERR(clk))
+ clk_put(clk);
+err_clk:
+ regmap_exit(regmap);
err_regmap:
iounmap(base);
err_map:
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 43d8683266de..e9cfb147345e 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -82,8 +82,7 @@ struct t7l66xb {
static int t7l66xb_mmc_enable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
unsigned long flags;
u8 dev_ctl;
int ret;
@@ -108,8 +107,7 @@ static int t7l66xb_mmc_enable(struct platform_device *mmc)
static int t7l66xb_mmc_disable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
unsigned long flags;
u8 dev_ctl;
@@ -128,16 +126,14 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state);
}
static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+ struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state);
}
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 85fab3729102..f417c6fecfe2 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -80,16 +80,14 @@ static int tc6387xb_resume(struct platform_device *dev)
static void tc6387xb_mmc_pwr(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_pwr(tc6387xb->scr + 0x200, 0, state);
}
static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_clk_div(tc6387xb->scr + 0x200, 0, state);
}
@@ -97,8 +95,7 @@ static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
static int tc6387xb_mmc_enable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
clk_prepare_enable(tc6387xb->clk32k);
@@ -110,8 +107,7 @@ static int tc6387xb_mmc_enable(struct platform_device *mmc)
static int tc6387xb_mmc_disable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
clk_disable_unprepare(tc6387xb->clk32k);
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 0c9f0390e891..6943048a64c2 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -122,14 +122,13 @@ enum {
static int tc6393xb_nand_enable(struct platform_device *nand)
{
- struct platform_device *dev = to_platform_device(nand->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(nand->dev.parent);
unsigned long flags;
raw_spin_lock_irqsave(&tc6393xb->lock, flags);
/* SMD buffer on */
- dev_dbg(&dev->dev, "SMD buffer on\n");
+ dev_dbg(nand->dev.parent, "SMD buffer on\n");
tmio_iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
@@ -312,8 +311,7 @@ static int tc6393xb_fb_disable(struct platform_device *dev)
int tc6393xb_lcd_set_power(struct platform_device *fb, bool on)
{
- struct platform_device *dev = to_platform_device(fb->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(fb->dev.parent);
u8 fer;
unsigned long flags;
@@ -334,8 +332,7 @@ EXPORT_SYMBOL(tc6393xb_lcd_set_power);
int tc6393xb_lcd_mode(struct platform_device *fb,
const struct fb_videomode *mode) {
- struct platform_device *dev = to_platform_device(fb->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(fb->dev.parent);
unsigned long flags;
raw_spin_lock_irqsave(&tc6393xb->lock, flags);
@@ -351,8 +348,7 @@ EXPORT_SYMBOL(tc6393xb_lcd_mode);
static int tc6393xb_mmc_enable(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
@@ -362,8 +358,7 @@ static int tc6393xb_mmc_enable(struct platform_device *mmc)
static int tc6393xb_mmc_resume(struct platform_device *mmc)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
@@ -373,16 +368,14 @@ static int tc6393xb_mmc_resume(struct platform_device *mmc)
static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
}
static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
{
- struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
}
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 3bd75061f777..f78be039e463 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = {
{ .compatible = "ti,tps65912", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table);
static int tps65912_spi_probe(struct spi_device *spi)
{
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 7c3c5fd5fcd0..86052c5c6069 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on)
}
}
+ /*
+ * Register access can produce errors after power-up unless we
+ * wait at least 8ms based on measurements on duovero.
+ */
+ usleep_range(10000, 12000);
+
/* Sync with the HW */
- regcache_sync(twl6040->regmap);
+ ret = regcache_sync(twl6040->regmap);
+ if (ret) {
+ dev_err(twl6040->dev, "Failed to sync with the HW: %i\n",
+ ret);
+ goto out;
+ }
/* Default PLL configuration after power up */
twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 3209ee020b15..b80cb6af0cb4 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,30 +496,6 @@ config VEXPRESS_SYSCFG
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
-config ASPEED_P2A_CTRL
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
- help
- Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
- ioctl()s, the driver also provides an interface for userspace mappings to
- a pre-defined region.
-
-config ASPEED_LPC_CTRL
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
- ---help---
- Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
- ioctl()s, the driver also provides a read/write interface to a BMC ram
- region where the host LPC read/write region can be buffered.
-
-config ASPEED_LPC_SNOOP
- tristate "Aspeed ast2500 HOST LPC snoop support"
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
- help
- Provides a driver to control the LPC snoop interface which
- allows the BMC to listen on and save the data written by
- the host to an arbitrary LPC I/O port.
-
config PCI_ENDPOINT_TEST
depends on PCI
select CRC32
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c36239573a5c..b9affcdaa3d6 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -54,9 +54,6 @@ obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
-obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
-obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
-obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
obj-y += cardreader/
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 25265fd0fd6e..89cff9d1012b 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -603,7 +603,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
- m->write, /* readable/writable */
+ m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 29582fe57151..7b015f2a1c6f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -75,6 +75,11 @@
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
+#define PCI_DEVICE_ID_TI_AM654 0xb00c
+
+#define is_am654_pci_dev(pdev) \
+ ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -588,6 +593,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
int ret = -EINVAL;
enum pci_barno bar;
struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
+ struct pci_dev *pdev = test->pdev;
mutex_lock(&test->mutex);
switch (cmd) {
@@ -595,6 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
bar = arg;
if (bar < 0 || bar > 5)
goto ret;
+ if (is_am654_pci_dev(pdev) && bar == BAR_0)
+ goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
case PCITEST_LEGACY_IRQ:
@@ -662,6 +670,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
+ test->test_reg_bar = test_reg_bar;
test->alignment = data->alignment;
irq_type = data->irq_type;
}
@@ -785,11 +794,20 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static const struct pci_endpoint_test_data am654_data = {
+ .test_reg_bar = BAR_2,
+ .alignment = SZ_64K,
+ .irq_type = IRQ_TYPE_MSI,
+};
+
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
+ .driver_data = (kernel_ulong_t)&am654_data
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 997f92543dd4..422d08da3244 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -242,7 +242,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/*
* Lock physical page backing a given user VA.
*/
- retval = get_user_pages_fast(uva, 1, 1, &context->notify_page);
+ retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
if (retval != 1) {
context->notify_page = NULL;
return VMCI_ERROR_GENERIC;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f5f1aac9d163..1174735f003d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -659,7 +659,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
int err = VMCI_SUCCESS;
retval = get_user_pages_fast((uintptr_t) produce_uva,
- produce_q->kernel_if->num_pages, 1,
+ produce_q->kernel_if->num_pages,
+ FOLL_WRITE,
produce_q->kernel_if->u.h.header_page);
if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
@@ -671,7 +672,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
}
retval = get_user_pages_fast((uintptr_t) consume_uva,
- consume_q->kernel_if->num_pages, 1,
+ consume_q->kernel_if->num_pages,
+ FOLL_WRITE,
consume_q->kernel_if->u.h.header_page);
if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index ec980bda071c..b61de360f26f 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index e22bbff89c8d..9cb93e15b197 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -24,7 +24,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/unaligned.h>
#include "mvsdio.h"
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c1d3f0e38921..e7d80c83da2c 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -35,7 +35,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/hardware.h>
#include <linux/platform_data/mmc-pxamci.h>
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 79a8ff542883..fb31a7f649a3 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -60,22 +60,6 @@ config MTD_CMDLINE_PARTS
If unsure, say 'N'.
-config MTD_AFS_PARTS
- tristate "ARM Firmware Suite partition parsing"
- depends on (ARM || ARM64)
- help
- The ARM Firmware Suite allows the user to divide flash devices into
- multiple 'images'. Each such image has a header containing its name
- and offset/size etc.
-
- If you need code which can detect and parse these tables, and
- register MTD 'partitions' corresponding to each image detected,
- enable this option.
-
- You will still need the parsing functions to be called by the driver
- for your particular device. It won't happen automatically. The
- 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
-
config MTD_OF_PARTS
tristate "OpenFirmware partitioning information support"
default y
@@ -94,6 +78,7 @@ config MTD_BCM63XX_PARTS
tristate "BCM63XX CFE partitioning support"
depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
select CRC32
+ select MTD_PARSER_IMAGETAG
help
This provides partition parsing for BCM63xx devices with CFE
bootloaders.
@@ -230,12 +215,11 @@ config SSFDC
This enables read only access to SmartMedia formatted NAND
flash. You can mount it with FAT file system.
-
config SM_FTL
tristate "SmartMedia/xD new translation layer"
depends on BLOCK
select MTD_BLKDEVS
- select MTD_NAND_ECC
+ select MTD_NAND_ECC_SW_HAMMING
help
This enables EXPERIMENTAL R/W support for SmartMedia/xD
FTL (Flash translation layer).
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 58fc327a5276..806287e80e84 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -9,7 +9,6 @@ mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
-obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
deleted file mode 100644
index d61b7edfc938..000000000000
--- a/drivers/mtd/afs.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*======================================================================
-
- drivers/mtd/afs.c: ARM Flash Layout/Partitioning
-
- Copyright © 2000 ARM Limited
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- This is access code for flashes using ARM's flash partitioning
- standards.
-
-======================================================================*/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#define AFSV1_FOOTER_MAGIC 0xA0FFFF9F
-
-struct footer_v1 {
- u32 image_info_base; /* Address of first word of ImageFooter */
- u32 image_start; /* Start of area reserved by this footer */
- u32 signature; /* 'Magic' number proves it's a footer */
- u32 type; /* Area type: ARM Image, SIB, customer */
- u32 checksum; /* Just this structure */
-};
-
-struct image_info_v1 {
- u32 bootFlags; /* Boot flags, compression etc. */
- u32 imageNumber; /* Unique number, selects for boot etc. */
- u32 loadAddress; /* Address program should be loaded to */
- u32 length; /* Actual size of image */
- u32 address; /* Image is executed from here */
- char name[16]; /* Null terminated */
- u32 headerBase; /* Flash Address of any stripped header */
- u32 header_length; /* Length of header in memory */
- u32 headerType; /* AIF, RLF, s-record etc. */
- u32 checksum; /* Image checksum (inc. this struct) */
-};
-
-static u32 word_sum(void *words, int num)
-{
- u32 *p = words;
- u32 sum = 0;
-
- while (num--)
- sum += *p++;
-
- return sum;
-}
-
-static int
-afs_read_footer_v1(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
- u_int off, u_int mask)
-{
- struct footer_v1 fs;
- u_int ptr = off + mtd->erasesize - sizeof(fs);
- size_t sz;
- int ret;
-
- ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
- if (ret >= 0 && sz != sizeof(fs))
- ret = -EINVAL;
-
- if (ret < 0) {
- printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
- ptr, ret);
- return ret;
- }
-
- /*
- * Does it contain the magic number?
- */
- if (fs.signature != AFSV1_FOOTER_MAGIC)
- return 0;
-
- /*
- * Check the checksum.
- */
- if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
- return 0;
-
- /*
- * Don't touch the SIB.
- */
- if (fs.type == 2)
- return 0;
-
- *iis_start = fs.image_info_base & mask;
- *img_start = fs.image_start & mask;
-
- /*
- * Check the image info base. This can not
- * be located after the footer structure.
- */
- if (*iis_start >= ptr)
- return 0;
-
- /*
- * Check the start of this image. The image
- * data can not be located after this block.
- */
- if (*img_start > off)
- return 0;
-
- return 1;
-}
-
-static int
-afs_read_iis_v1(struct mtd_info *mtd, struct image_info_v1 *iis, u_int ptr)
-{
- size_t sz;
- int ret, i;
-
- memset(iis, 0, sizeof(*iis));
- ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
- if (ret < 0)
- goto failed;
-
- if (sz != sizeof(*iis)) {
- ret = -EINVAL;
- goto failed;
- }
-
- ret = 0;
-
- /*
- * Validate the name - it must be NUL terminated.
- */
- for (i = 0; i < sizeof(iis->name); i++)
- if (iis->name[i] == '\0')
- break;
-
- if (i < sizeof(iis->name))
- ret = 1;
-
- return ret;
-
- failed:
- printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
- ptr, ret);
- return ret;
-}
-
-static int parse_afs_partitions(struct mtd_info *mtd,
- const struct mtd_partition **pparts,
- struct mtd_part_parser_data *data)
-{
- struct mtd_partition *parts;
- u_int mask, off, idx, sz;
- int ret = 0;
- char *str;
-
- /*
- * This is the address mask; we use this to mask off out of
- * range address bits.
- */
- mask = mtd->size - 1;
-
- /*
- * First, calculate the size of the array we need for the
- * partition information. We include in this the size of
- * the strings.
- */
- for (idx = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
- struct image_info_v1 iis;
- u_int iis_ptr, img_ptr;
-
- ret = afs_read_footer_v1(mtd, &img_ptr, &iis_ptr, off, mask);
- if (ret < 0)
- break;
- if (ret) {
- ret = afs_read_iis_v1(mtd, &iis, iis_ptr);
- if (ret < 0)
- break;
- if (ret == 0)
- continue;
-
- sz += sizeof(struct mtd_partition);
- sz += strlen(iis.name) + 1;
- idx += 1;
- }
- }
-
- if (!sz)
- return ret;
-
- parts = kzalloc(sz, GFP_KERNEL);
- if (!parts)
- return -ENOMEM;
-
- str = (char *)(parts + idx);
-
- /*
- * Identify the partitions
- */
- for (idx = off = 0; off < mtd->size; off += mtd->erasesize) {
- struct image_info_v1 iis;
- u_int iis_ptr, img_ptr;
-
- /* Read the footer. */
- ret = afs_read_footer_v1(mtd, &img_ptr, &iis_ptr, off, mask);
- if (ret < 0)
- break;
- if (ret == 0)
- continue;
-
- /* Read the image info block */
- ret = afs_read_iis_v1(mtd, &iis, iis_ptr);
- if (ret < 0)
- break;
- if (ret == 0)
- continue;
-
- strcpy(str, iis.name);
-
- parts[idx].name = str;
- parts[idx].size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
- parts[idx].offset = img_ptr;
- parts[idx].mask_flags = 0;
-
- printk(" mtd%d: at 0x%08x, %5lluKiB, %8u, %s\n",
- idx, img_ptr, parts[idx].size / 1024,
- iis.imageNumber, str);
-
- idx += 1;
- str = str + strlen(iis.name) + 1;
- }
-
- if (!idx) {
- kfree(parts);
- parts = NULL;
- }
-
- *pparts = parts;
- return idx ? idx : ret;
-}
-
-static struct mtd_part_parser afs_parser = {
- .parse_fn = parse_afs_partitions,
- .name = "afs",
-};
-module_mtd_part_parser(afs_parser);
-
-MODULE_AUTHOR("ARM Ltd");
-MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 41d1d3149c61..b2bd04764e95 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
@@ -93,51 +94,19 @@ static int bcm63xx_read_nvram(struct mtd_info *master,
return 0;
}
-static int bcm63xx_read_image_tag(struct mtd_info *master, const char *name,
- loff_t tag_offset, struct bcm_tag *buf)
-{
- int ret;
- size_t retlen;
- u32 computed_crc;
-
- ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
- if (ret)
- return ret;
-
- if (retlen != sizeof(*buf))
- return -EIO;
-
- computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
- offsetof(struct bcm_tag, header_crc));
- if (computed_crc == buf->header_crc) {
- STR_NULL_TERMINATE(buf->board_id);
- STR_NULL_TERMINATE(buf->tag_version);
-
- pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
- name, tag_offset, buf->tag_version, buf->board_id);
-
- return 0;
- }
-
- pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
- name, tag_offset, buf->header_crc, computed_crc);
- return 1;
-}
+static const char * const bcm63xx_cfe_part_types[] = {
+ "bcm963xx-imagetag",
+ NULL,
+};
static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram)
{
- /* CFE, NVRAM and global Linux are always present */
- int nrparts = 3, curpart = 0;
- struct bcm_tag *buf = NULL;
struct mtd_partition *parts;
- int ret;
- unsigned int rootfsaddr, kerneladdr, spareaddr;
- unsigned int rootfslen, kernellen, sparelen, totallen;
+ int nrparts = 3, curpart = 0;
unsigned int cfelen, nvramlen;
unsigned int cfe_erasesize;
int i;
- bool rootfs_first = false;
cfe_erasesize = max_t(uint32_t, master->erasesize,
BCM963XX_CFE_BLOCK_SIZE);
@@ -146,83 +115,9 @@ static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
nvramlen = nvram->psi_size * SZ_1K;
nvramlen = roundup(nvramlen, cfe_erasesize);
- buf = vmalloc(sizeof(struct bcm_tag));
- if (!buf)
- return -ENOMEM;
-
- /* Get the tag */
- ret = bcm63xx_read_image_tag(master, "rootfs", cfelen, buf);
- if (!ret) {
- STR_NULL_TERMINATE(buf->flash_image_start);
- if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
- rootfsaddr < BCM963XX_EXTENDED_SIZE) {
- pr_err("invalid rootfs address: %*ph\n",
- (int)sizeof(buf->flash_image_start),
- buf->flash_image_start);
- goto invalid_tag;
- }
-
- STR_NULL_TERMINATE(buf->kernel_address);
- if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
- kerneladdr < BCM963XX_EXTENDED_SIZE) {
- pr_err("invalid kernel address: %*ph\n",
- (int)sizeof(buf->kernel_address),
- buf->kernel_address);
- goto invalid_tag;
- }
-
- STR_NULL_TERMINATE(buf->kernel_length);
- if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
- pr_err("invalid kernel length: %*ph\n",
- (int)sizeof(buf->kernel_length),
- buf->kernel_length);
- goto invalid_tag;
- }
-
- STR_NULL_TERMINATE(buf->total_length);
- if (kstrtouint(buf->total_length, 10, &totallen)) {
- pr_err("invalid total length: %*ph\n",
- (int)sizeof(buf->total_length),
- buf->total_length);
- goto invalid_tag;
- }
-
- kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE;
- rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE;
- spareaddr = roundup(totallen, master->erasesize) + cfelen;
-
- if (rootfsaddr < kerneladdr) {
- /* default Broadcom layout */
- rootfslen = kerneladdr - rootfsaddr;
- rootfs_first = true;
- } else {
- /* OpenWrt layout */
- rootfsaddr = kerneladdr + kernellen;
- rootfslen = spareaddr - rootfsaddr;
- }
- } else if (ret > 0) {
-invalid_tag:
- kernellen = 0;
- rootfslen = 0;
- rootfsaddr = 0;
- spareaddr = cfelen;
- } else {
- goto out;
- }
- sparelen = master->size - spareaddr - nvramlen;
-
- /* Determine number of partitions */
- if (rootfslen > 0)
- nrparts++;
-
- if (kernellen > 0)
- nrparts++;
-
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
- if (!parts) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!parts)
+ return -ENOMEM;
/* Start building partition list */
parts[curpart].name = "CFE";
@@ -230,30 +125,6 @@ invalid_tag:
parts[curpart].size = cfelen;
curpart++;
- if (kernellen > 0) {
- int kernelpart = curpart;
-
- if (rootfslen > 0 && rootfs_first)
- kernelpart++;
- parts[kernelpart].name = "kernel";
- parts[kernelpart].offset = kerneladdr;
- parts[kernelpart].size = kernellen;
- curpart++;
- }
-
- if (rootfslen > 0) {
- int rootfspart = curpart;
-
- if (kernellen > 0 && rootfs_first)
- rootfspart--;
- parts[rootfspart].name = "rootfs";
- parts[rootfspart].offset = rootfsaddr;
- parts[rootfspart].size = rootfslen;
- if (sparelen > 0 && !rootfs_first)
- parts[rootfspart].size += sparelen;
- curpart++;
- }
-
parts[curpart].name = "nvram";
parts[curpart].offset = master->size - nvramlen;
parts[curpart].size = nvramlen;
@@ -263,22 +134,13 @@ invalid_tag:
parts[curpart].name = "linux";
parts[curpart].offset = cfelen;
parts[curpart].size = master->size - cfelen - nvramlen;
+ parts[curpart].types = bcm63xx_cfe_part_types;
for (i = 0; i < nrparts; i++)
pr_info("Partition %d is %s offset %llx and length %llx\n", i,
parts[i].name, parts[i].offset, parts[i].size);
- pr_info("Spare partition is offset %x and length %x\n", spareaddr,
- sparelen);
-
*pparts = parts;
- ret = 0;
-
-out:
- vfree(buf);
-
- if (ret)
- return ret;
return nrparts;
}
@@ -311,9 +173,16 @@ out:
return ret;
};
+static const struct of_device_id parse_bcm63xx_cfe_match_table[] = {
+ { .compatible = "brcm,bcm963xx-cfe-nor-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, parse_bcm63xx_cfe_match_table);
+
static struct mtd_part_parser bcm63xx_cfe_parser = {
.parse_fn = bcm63xx_parse_cfe_partitions,
.name = "bcm63xxpart",
+ .of_match_table = parse_bcm63xx_cfe_match_table,
};
module_mtd_part_parser(bcm63xx_cfe_parser);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 7b7286b4d81e..c8fa5906bdf9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -869,6 +869,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
+ /* fall through */
default:
sleep:
@@ -2751,6 +2752,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
+ /* fall through */
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 6f16552cd59f..e3b266ee06af 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -109,10 +109,13 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
case 8:
onecmd |= (onecmd << (chip_mode * 32));
#endif
+ /* fall through */
case 4:
onecmd |= (onecmd << (chip_mode * 16));
+ /* fall through */
case 2:
onecmd |= (onecmd << (chip_mode * 8));
+ /* fall through */
case 1:
;
}
@@ -162,10 +165,13 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
case 8:
res |= (onestat >> (chip_mode * 32));
#endif
+ /* fall through */
case 4:
res |= (onestat >> (chip_mode * 16));
+ /* fall through */
case 2:
res |= (onestat >> (chip_mode * 8));
+ /* fall through */
case 1:
;
}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index aa983422aa97..f9258d666846 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
- select BCH_CONST_PARAMS if !MTD_NAND_BCH
+ select BCH_CONST_PARAMS if !MTD_NAND_ECC_SW_BCH
select BITREVERSE
help
This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 9ee04b5f9311..8a8627c30aed 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -147,8 +147,10 @@ static int parse_num64(uint64_t *num64, char *token)
switch (token[len - 2]) {
case 'G':
shift += 10;
+ /* fall through */
case 'M':
shift += 10;
+ /* fall through */
case 'k':
shift += 10;
token[len - 2] = 0;
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index b13557fe52bd..76a4c73e100e 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -318,6 +318,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
+ /* fall through */
default:
sleep:
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index e0cf869c8544..544ed1931843 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -10,7 +10,7 @@ config MTD_COMPLEX_MAPPINGS
config MTD_PHYSMAP
tristate "Flash device in physical memory map"
- depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR
+ depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM || MTD_LPDDR
help
This provides a 'mapping' driver which allows the NOR Flash and
ROM driver code to communicate with chips which are mapped
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index d9a3e4bebe5d..21b556afc305 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -132,6 +132,8 @@ static void physmap_set_addr_gpios(struct physmap_flash_info *info,
gpiod_set_value(info->gpios->desc[i], !!(BIT(i) & ofs));
}
+
+ info->gpio_values = ofs;
}
#define win_mask(order) (BIT(order) - 1)
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
index 60775b208fc9..a289c8b5cabf 100644
--- a/drivers/mtd/maps/physmap-gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -86,7 +86,7 @@ static void gemini_flash_disable_pins(void)
static map_word __xipram gemini_flash_map_read(struct map_info *map,
unsigned long ofs)
{
- map_word __xipram ret;
+ map_word ret;
gemini_flash_enable_pins();
ret = inline_map_read(map, ofs);
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index fd5fe12d7461..893239629d6b 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -20,7 +20,7 @@
#include <linux/mtd/concat.h>
#include <mach/hardware.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/flash.h>
struct sa_subdev_info {
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index aef030ca8601..de4c46318abb 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -31,13 +31,7 @@
#define MAP_NAME "ram"
#endif
-/*
- * Blackfin uses uclinux_ram_map during startup, so it must not be static.
- * Provide a dummy declaration to make sparse happy.
- */
-extern struct map_info uclinux_ram_map;
-
-struct map_info uclinux_ram_map = {
+static struct map_info uclinux_ram_map = {
.name = MAP_NAME,
.size = 0,
};
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 37f174ccbcec..dfa241ad018b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -572,7 +572,7 @@ static ssize_t mtd_partition_offset_show(struct device *dev,
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_part *part = mtd_to_part(mtd);
- return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset);
}
static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 9033215e62ea..495751ed3fd7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,6 +2,5 @@ config MTD_NAND_CORE
tristate
source "drivers/mtd/nand/onenand/Kconfig"
-
source "drivers/mtd/nand/raw/Kconfig"
source "drivers/mtd/nand/spi/Kconfig"
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index 9c9f8936b63b..b6de955ac8bf 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -174,6 +174,40 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
/**
+ * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
+ * a specific region of the NAND device
+ * @mtd: MTD device
+ * @offs: offset of the NAND region
+ * @len: length of the NAND region
+ *
+ * Default implementation for mtd->_max_bad_blocks(). Only works if
+ * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
+ *
+ * Return: a positive number encoding the maximum number of eraseblocks on a
+ * portion of memory, a negative error code otherwise.
+ */
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos, end;
+ unsigned int max_bb = 0;
+
+ if (!nand->memorg.max_bad_eraseblocks_per_lun)
+ return -ENOTSUPP;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_offs_to_pos(nand, offs + len, &end);
+
+ for (nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_pos_cmp(&pos, &end) < 0;
+ nanddev_pos_next_lun(nand, &pos))
+ max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
+
+ return max_bb;
+}
+EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
+
+/**
* nanddev_init() - Initialize a NAND device
* @nand: NAND device
* @ops: NAND device operations
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 4ca4b194e7d7..f41d76248550 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -2458,7 +2458,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we don't have to mess with 16-bit access */
- ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
+ ofs += mtd->oobsize + (this->badblockpos & ~0x01);
/* FIXME : What to do when marking SLC block in partition
* with MLC erasesize? For now, it is not advisable to
* create partitions containing both SLC and MLC regions.
@@ -3967,6 +3967,9 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING))
this->unlock_all(mtd);
+ /* Set the bad block marker position */
+ this->badblockpos = ONENAND_BADBLOCK_POS;
+
ret = this->scan_bbt(mtd);
if ((!FLEXONENAND(this)) || ret)
return ret;
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
index dde20487937d..57c31c81be18 100644
--- a/drivers/mtd/nand/onenand/onenand_bbt.c
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -190,9 +190,6 @@ static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
if (!bbm->bbt)
return -ENOMEM;
- /* Set the bad block position */
- bbm->badblockpos = ONENAND_BADBLOCK_POS;
-
/* Set erase shift */
bbm->bbt_erase_shift = this->erase_shift;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index e604625e2dfa..0500c42f31cb 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -1,34 +1,29 @@
-config MTD_NAND_ECC
+config MTD_NAND_ECC_SW_HAMMING
tristate
-config MTD_NAND_ECC_SMC
+config MTD_NAND_ECC_SW_HAMMING_SMC
bool "NAND ECC Smart Media byte order"
- depends on MTD_NAND_ECC
+ depends on MTD_NAND_ECC_SW_HAMMING
default n
help
Software ECC according to the Smart Media Specification.
The original Linux implementation had byte 0 and 1 swapped.
-
-menuconfig MTD_NAND
+menuconfig MTD_RAW_NAND
tristate "Raw/Parallel NAND Device Support"
depends on MTD
- select MTD_NAND_ECC
+ select MTD_NAND_CORE
+ select MTD_NAND_ECC_SW_HAMMING
help
This enables support for accessing all type of raw/parallel
NAND flash devices. For further information see
<http://www.linux-mtd.infradead.org/doc/nand.html>.
-if MTD_NAND
+if MTD_RAW_NAND
-config MTD_NAND_BCH
- tristate
- select BCH
- depends on MTD_NAND_ECC_BCH
- default MTD_NAND
-
-config MTD_NAND_ECC_BCH
+config MTD_NAND_ECC_SW_BCH
bool "Support software BCH ECC"
+ select BCH
default n
help
This enables support for software BCH error correction. Binary BCH
@@ -36,15 +31,13 @@ config MTD_NAND_ECC_BCH
ECC codes. They are used with NAND devices requiring more than 1 bit
of error correction.
-config MTD_SM_COMMON
- tristate
- default n
+comment "Raw/parallel NAND flash controllers"
config MTD_NAND_DENALI
tristate
config MTD_NAND_DENALI_PCI
- tristate "Support Denali NAND controller on Intel Moorestown"
+ tristate "Denali NAND controller on Intel Moorestown"
select MTD_NAND_DENALI
depends on PCI
help
@@ -52,31 +45,22 @@ config MTD_NAND_DENALI_PCI
Denali NAND controller core.
config MTD_NAND_DENALI_DT
- tristate "Support Denali NAND controller as a DT device"
+ tristate "Denali NAND controller as a DT device"
select MTD_NAND_DENALI
depends on HAS_DMA && HAVE_CLK && OF
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
-config MTD_NAND_GPIO
- tristate "GPIO assisted NAND Flash driver"
- depends on GPIOLIB || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables a NAND flash driver where control signals are
- connected to GPIO pins, and commands and data are communicated
- via a memory mapped interface.
-
config MTD_NAND_AMS_DELTA
- tristate "NAND Flash device on Amstrad E3"
+ tristate "Amstrad E3 NAND controller"
depends on MACH_AMS_DELTA || COMPILE_TEST
default y
help
Support for NAND flash on Amstrad E3 (Delta).
config MTD_NAND_OMAP2
- tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
+ tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -98,18 +82,6 @@ config MTD_NAND_OMAP_BCH
config MTD_NAND_OMAP_BCH_BUILD
def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
-config MTD_NAND_RICOH
- tristate "Ricoh xD card reader"
- default n
- depends on PCI
- select MTD_SM_COMMON
- help
- Enable support for Ricoh R5C852 xD card reader
- You also need to enable ether
- NAND SSFDC (SmartMedia) read only translation layer' or new
- expermental, readwrite
- 'SmartMedia/xD new translation layer'
-
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
depends on MIPS_ALCHEMY
@@ -117,8 +89,15 @@ config MTD_NAND_AU1550
This enables the driver for the NAND flash controller on the
AMD/Alchemy 1550 SOC.
+config MTD_NAND_NDFC
+ tristate "IBM/MCC 4xx NAND controller"
+ depends on 4xx
+ select MTD_NAND_ECC_SW_HAMMING_SMC
+ help
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
config MTD_NAND_S3C2410
- tristate "NAND Flash support for Samsung S3C SoCs"
+ tristate "Samsung S3C NAND controller"
depends on ARCH_S3C24XX || ARCH_S3C64XX
help
This enables the NAND flash controller on the S3C24xx and S3C64xx
@@ -128,18 +107,11 @@ config MTD_NAND_S3C2410
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
- bool "Samsung S3C NAND driver debug"
+ bool "Samsung S3C NAND controller debug"
depends on MTD_NAND_S3C2410
help
Enable debugging of the S3C NAND driver
-config MTD_NAND_NDFC
- tristate "NDFC NanD Flash Controller"
- depends on 4xx
- select MTD_NAND_ECC_SMC
- help
- NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
-
config MTD_NAND_S3C2410_CLKSTOP
bool "Samsung S3C NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
@@ -151,89 +123,19 @@ config MTD_NAND_S3C2410_CLKSTOP
approximately 5mA of power when there is nothing happening.
config MTD_NAND_TANGO
- tristate "NAND Flash support for Tango chips"
+ tristate "Tango NAND controller"
depends on ARCH_TANGO || COMPILE_TEST
depends on HAS_IOMEM
help
Enables the NAND Flash controller on Tango chips.
-config MTD_NAND_DISKONCHIP
- tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
- depends on HAS_IOMEM
- select REED_SOLOMON
- select REED_SOLOMON_DEC16
- help
- This is a reimplementation of M-Systems DiskOnChip 2000,
- Millennium and Millennium Plus as a standard NAND device driver,
- as opposed to the earlier self-contained MTD device drivers.
- This should enable, among other things, proper JFFS2 operation on
- these devices.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_NAND_DISKONCHIP
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- depends on MTD_NAND_DISKONCHIP
- default "0"
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
-
-config MTD_NAND_DISKONCHIP_PROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
-
-config MTD_NAND_DISKONCHIP_BBTWRITE
- bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
- depends on MTD_NAND_DISKONCHIP
- help
- On DiskOnChip devices shipped with the INFTL filesystem (Millennium
- and 2000 TSOP/Alon), Linux reserves some space at the end of the
- device for the Bad Block Table (BBT). If you have existing INFTL
- data on your device (created by non-Linux tools such as M-Systems'
- DOS drivers), your data might overlap the area Linux wants to use for
- the BBT. If this is a concern for you, leave this option disabled and
- Linux will not write BBT data into this area.
- The downside of leaving this option disabled is that if bad blocks
- are detected by Linux, they will not be recorded in the BBT, which
- could cause future problems.
- Once you enable this option, new filesystems (INFTL or others, created
- in Linux or other operating systems) will not use the reserved area.
- The only reason not to enable this option is to prevent damage to
- preexisting filesystems.
- Even if you leave this disabled, you can enable BBT writes at module
- load time (assuming you build diskonchip as a module) with the module
- parameter "inftl_bbt_write=1".
-
config MTD_NAND_SHARPSL
- tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
+ tristate "Sharp SL Series (C7xx + others) NAND controller"
depends on ARCH_PXA || COMPILE_TEST
depends on HAS_IOMEM
config MTD_NAND_CAFE
- tristate "NAND support for OLPC CAFÉ chip"
+ tristate "OLPC CAFÉ NAND controller"
depends on PCI
select REED_SOLOMON
select REED_SOLOMON_DEC16
@@ -242,7 +144,7 @@ config MTD_NAND_CAFE
laptop.
config MTD_NAND_CS553X
- tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
+ tristate "CS5535/CS5536 (AMD Geode companion) NAND controller"
depends on X86_32
depends on !UML && HAS_IOMEM
help
@@ -256,7 +158,7 @@ config MTD_NAND_CS553X
If you say "m", the module will be called cs553x_nand.
config MTD_NAND_ATMEL
- tristate "Support for NAND Flash / SmartMedia on AT91"
+ tristate "Atmel AT91 NAND Flash/SmartMedia NAND controller"
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
select GENERIC_ALLOCATOR
@@ -265,8 +167,17 @@ config MTD_NAND_ATMEL
Enables support for NAND Flash / Smart Media Card interface
on Atmel AT91 processors.
+config MTD_NAND_ORION
+ tristate "Marvell Orion NAND controller"
+ depends on PLAT_ORION
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
config MTD_NAND_MARVELL
- tristate "NAND controller support on Marvell boards"
+ tristate "Marvell EBU NAND controller"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
COMPILE_TEST
depends on HAS_IOMEM
@@ -278,7 +189,7 @@ config MTD_NAND_MARVELL
- 64-bit Aramda platforms (7k, 8k) (NFCv2)
config MTD_NAND_SLC_LPC32XX
- tristate "NXP LPC32xx SLC Controller"
+ tristate "NXP LPC32xx SLC NAND controller"
depends on ARCH_LPC32XX || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -290,7 +201,7 @@ config MTD_NAND_SLC_LPC32XX
by the SLC NAND controller.
config MTD_NAND_MLC_LPC32XX
- tristate "NXP LPC32xx MLC Controller"
+ tristate "NXP LPC32xx MLC NAND controller"
depends on ARCH_LPC32XX || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -302,38 +213,23 @@ config MTD_NAND_MLC_LPC32XX
by the MLC NAND controller.
config MTD_NAND_CM_X270
- tristate "Support for NAND Flash on CM-X270 modules"
+ tristate "CM-X270 modules NAND controller"
depends on MACH_ARMCORE
config MTD_NAND_PASEMI
- tristate "NAND support for PA Semi PWRficient"
+ tristate "PA Semi PWRficient NAND controller"
depends on PPC_PASEMI
help
Enables support for NAND Flash interface on PA Semi PWRficient
based boards
config MTD_NAND_TMIO
- tristate "NAND Flash device on Toshiba Mobile IO Controller"
+ tristate "Toshiba Mobile IO NAND controller"
depends on MFD_TMIO
help
Support for NAND flash connected to a Toshiba Mobile IO
Controller in some PDAs, including the Sharp SL6000x.
-config MTD_NAND_NANDSIM
- tristate "Support for NAND Flash Simulator"
- help
- The simulator may simulate various NAND flash chips for the
- MTD nand layer.
-
-config MTD_NAND_GPMI_NAND
- tristate "GPMI NAND Flash Controller driver"
- depends on MXS_DMA
- help
- Enables NAND Flash support for IMX23, IMX28 or IMX6.
- The GPMI controller is very powerful, with the help of BCH
- module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time.
-
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
depends on ARM || ARM64 || MIPS || COMPILE_TEST
@@ -344,7 +240,7 @@ config MTD_NAND_BRCMNAND
BCM3xxx, BCM63xxx, iProc/Cygnus and more.
config MTD_NAND_BCM47XXNFLASH
- tristate "Support for NAND flash on BCM4706 BCMA bus"
+ tristate "BCM4706 BCMA NAND controller"
depends on BCMA_NFLASH
depends on BCMA
help
@@ -352,32 +248,31 @@ config MTD_NAND_BCM47XXNFLASH
registered by bcma as platform devices. This enables driver for
NAND flash memories. For now only BCM4706 is supported.
-config MTD_NAND_PLATFORM
- tristate "Support for generic platform NAND driver"
+config MTD_NAND_OXNAS
+ tristate "Oxford Semiconductor NAND controller"
+ depends on ARCH_OXNAS || COMPILE_TEST
depends on HAS_IOMEM
help
- This implements a generic NAND driver for on-SOC platform
- devices. You will need to provide platform-specific functions
- via platform_data.
+ This enables the NAND flash controller on Oxford Semiconductor SoCs.
-config MTD_NAND_ORION
- tristate "NAND Flash support for Marvell Orion SoC"
- depends on PLAT_ORION
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 NAND controller"
+ depends on PPC_MPC512x
help
- This enables the NAND flash controller on Orion machines.
-
- No board specific support is done by this driver, each board
- must advertise a platform_device for the driver to attach.
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
-config MTD_NAND_OXNAS
- tristate "NAND Flash support for Oxford Semiconductor SoC"
- depends on ARCH_OXNAS || COMPILE_TEST
- depends on HAS_IOMEM
+config MTD_NAND_GPMI_NAND
+ tristate "Freescale GPMI NAND controller"
+ depends on MXS_DMA
help
- This enables the NAND flash controller on Oxford Semiconductor SoCs.
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time.
config MTD_NAND_FSL_ELBC
- tristate "NAND support for Freescale eLBC controllers"
+ tristate "Freescale eLBC NAND controller"
depends on FSL_SOC
select FSL_LBC
help
@@ -387,7 +282,7 @@ config MTD_NAND_FSL_ELBC
external NAND devices.
config MTD_NAND_FSL_IFC
- tristate "NAND support for Freescale IFC controller"
+ tristate "Freescale IFC NAND controller"
depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
depends on HAS_IOMEM
select FSL_IFC
@@ -399,22 +294,15 @@ config MTD_NAND_FSL_IFC
external NAND devices.
config MTD_NAND_FSL_UPM
- tristate "Support for NAND on Freescale UPM"
+ tristate "Freescale UPM NAND controller"
depends on PPC_83xx || PPC_85xx
select FSL_LBC
help
Enables support for NAND Flash chips wired onto Freescale PowerPC
processor localbus with User-Programmable Machine support.
-config MTD_NAND_MPC5121_NFC
- tristate "MPC5121 built-in NAND Flash Controller support"
- depends on PPC_MPC512x
- help
- This enables the driver for the NAND flash controller on the
- MPC5121 SoC.
-
config MTD_NAND_VF610_NFC
- tristate "Support for Freescale NFC for VF610/MPC5125"
+ tristate "Freescale VF610/MPC5125 NAND controller"
depends on (SOC_VF610 || COMPILE_TEST)
depends on HAS_IOMEM
help
@@ -426,7 +314,7 @@ config MTD_NAND_VF610_NFC
device tree.
config MTD_NAND_MXC
- tristate "MXC NAND support"
+ tristate "Freescale MXC NAND controller"
depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -434,7 +322,7 @@ config MTD_NAND_MXC
MXC processors.
config MTD_NAND_SH_FLCTL
- tristate "Support for NAND on Renesas SuperH FLCTL"
+ tristate "Renesas SuperH FLCTL NAND controller"
depends on SUPERH || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -442,7 +330,7 @@ config MTD_NAND_SH_FLCTL
for NAND Flash using FLCTL.
config MTD_NAND_DAVINCI
- tristate "Support NAND on DaVinci/Keystone SoC"
+ tristate "DaVinci/Keystone NAND controller"
depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -450,42 +338,30 @@ config MTD_NAND_DAVINCI
DaVinci/Keystone processors.
config MTD_NAND_TXX9NDFMC
- tristate "NAND Flash support for TXx9 SoC"
+ tristate "TXx9 NAND controller"
depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
depends on HAS_IOMEM
help
This enables the NAND flash controller on the TXx9 SoCs.
config MTD_NAND_SOCRATES
- tristate "Support for NAND on Socrates board"
+ tristate "Socrates NAND controller"
depends on SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
config MTD_NAND_NUC900
- tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
+ tristate "Nuvoton NUC9xx/w90p910 NAND controller"
depends on ARCH_W90X900 || COMPILE_TEST
depends on HAS_IOMEM
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
-config MTD_NAND_JZ4740
- tristate "Support for JZ4740 SoC NAND controller"
- depends on MACH_JZ4740 || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Enables support for NAND Flash on JZ4740 SoC based boards.
-
-config MTD_NAND_JZ4780
- tristate "Support for NAND on JZ4780 SoC"
- depends on JZ4780_NEMC
- help
- Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
- based boards, using the BCH controller for hardware error correction.
+source "drivers/mtd/nand/raw/ingenic/Kconfig"
config MTD_NAND_FSMC
- tristate "Support for NAND on ST Micros FSMC"
+ tristate "ST Micros FSMC NAND controller"
depends on OF && HAS_IOMEM
depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300 || \
COMPILE_TEST
@@ -494,28 +370,28 @@ config MTD_NAND_FSMC
Flexible Static Memory Controller (FSMC)
config MTD_NAND_XWAY
- bool "Support for NAND on Lantiq XWAY SoC"
+ bool "Lantiq XWAY NAND controller"
depends on LANTIQ && SOC_TYPE_XWAY
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
to the External Bus Unit (EBU).
config MTD_NAND_SUNXI
- tristate "Support for NAND on Allwinner SoCs"
+ tristate "Allwinner NAND controller"
depends on ARCH_SUNXI || COMPILE_TEST
depends on HAS_IOMEM
help
Enables support for NAND Flash chips on Allwinner SoCs.
config MTD_NAND_HISI504
- tristate "Support for NAND controller on Hisilicon SoC Hip04"
+ tristate "Hisilicon Hip04 NAND controller"
depends on ARCH_HISI || COMPILE_TEST
depends on HAS_IOMEM
help
Enables support for NAND controller on Hisilicon SoC Hip04.
config MTD_NAND_QCOM
- tristate "Support for NAND on QCOM SoCs"
+ tristate "QCOM NAND controller"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -523,7 +399,7 @@ config MTD_NAND_QCOM
controller. This controller is found on IPQ806x SoC.
config MTD_NAND_MTK
- tristate "Support for NAND controller on MTK SoCs"
+ tristate "MTK NAND controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -531,7 +407,7 @@ config MTD_NAND_MTK
This controller is found on mt27xx, mt81xx, mt65xx SoCs.
config MTD_NAND_TEGRA
- tristate "Support for NAND controller on NVIDIA Tegra"
+ tristate "NVIDIA Tegra NAND controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -558,4 +434,115 @@ config MTD_NAND_MESON
Enables support for NAND controller on Amlogic's Meson SoCs.
This controller is found on Meson SoCs.
-endif # MTD_NAND
+config MTD_NAND_GPIO
+ tristate "GPIO assisted NAND controller"
+ depends on GPIOLIB || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables a NAND flash driver where control signals are
+ connected to GPIO pins, and commands and data are communicated
+ via a memory mapped interface.
+
+config MTD_NAND_PLATFORM
+ tristate "Generic NAND controller"
+ depends on HAS_IOMEM
+ help
+ This implements a generic NAND driver for on-SOC platform
+ devices. You will need to provide platform-specific functions
+ via platform_data.
+
+comment "Misc"
+
+config MTD_SM_COMMON
+ tristate
+ default n
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ help
+ The simulator may simulate various NAND flash chips for the
+ MTD nand layer.
+
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
+config MTD_NAND_DISKONCHIP
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
+ depends on HAS_IOMEM
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ This is a reimplementation of M-Systems DiskOnChip 2000,
+ Millennium and Millennium Plus as a standard NAND device driver,
+ as opposed to the earlier self-contained MTD device drivers.
+ This should enable, among other things, proper JFFS2 operation on
+ these devices.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
+
+config MTD_NAND_DISKONCHIP_PROBE_HIGH
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
+
+config MTD_NAND_DISKONCHIP_BBTWRITE
+ bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ On DiskOnChip devices shipped with the INFTL filesystem (Millennium
+ and 2000 TSOP/Alon), Linux reserves some space at the end of the
+ device for the Bad Block Table (BBT). If you have existing INFTL
+ data on your device (created by non-Linux tools such as M-Systems'
+ DOS drivers), your data might overlap the area Linux wants to use for
+ the BBT. If this is a concern for you, leave this option disabled and
+ Linux will not write BBT data into this area.
+ The downside of leaving this option disabled is that if bad blocks
+ are detected by Linux, they will not be recorded in the BBT, which
+ could cause future problems.
+ Once you enable this option, new filesystems (INFTL or others, created
+ in Linux or other operating systems) will not use the reserved area.
+ The only reason not to enable this option is to prevent damage to
+ preexisting filesystems.
+ Even if you leave this disabled, you can enable BBT writes at module
+ load time (assuming you build diskonchip as a module) with the module
+ parameter "inftl_bbt_write=1".
+
+endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 5a5a72f0793e..efaf5cd25edc 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTD_NAND) += nand.o
-obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
-obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o
+obj-$(CONFIG_MTD_RAW_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += nand_ecc.o
+nand-$(CONFIG_MTD_NAND_ECC_SW_BCH) += nand_bch.o
obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
@@ -45,8 +45,7 @@ obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
-obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
-obj-$(CONFIG_MTD_NAND_JZ4780) += jz4780_nand.o jz4780_bch.o
+obj-y += ingenic/
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 5781fcf6b76c..8d6be90a6fe8 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 ATMEL
* Copyright 2017 Free Electrons
@@ -29,10 +30,6 @@
* Add Nand Flash Controller support for SAMA5 SoC
* Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* A few words about the naming convention in this file. This convention
* applies to structure and function names.
*
@@ -65,6 +62,7 @@
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
#include "pmecc.h"
@@ -211,6 +209,7 @@ struct atmel_nand_controller_caps {
bool legacy_of_bindings;
u32 ale_offs;
u32 cle_offs;
+ const char *ebi_csa_regmap_name;
const struct atmel_nand_controller_ops *ops;
};
@@ -231,10 +230,15 @@ to_nand_controller(struct nand_controller *ctl)
return container_of(ctl, struct atmel_nand_controller, base);
}
+struct atmel_smc_nand_ebi_csa_cfg {
+ u32 offs;
+ u32 nfd0_on_d16;
+};
+
struct atmel_smc_nand_controller {
struct atmel_nand_controller base;
- struct regmap *matrix;
- unsigned int ebi_csa_offs;
+ struct regmap *ebi_csa_regmap;
+ struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
};
static inline struct atmel_smc_nand_controller *
@@ -1068,15 +1072,15 @@ static int atmel_nand_pmecc_init(struct nand_chip *chip)
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
else if (chip->ecc.strength)
req.ecc.strength = chip->ecc.strength;
- else if (chip->ecc_strength_ds)
- req.ecc.strength = chip->ecc_strength_ds;
+ else if (chip->base.eccreq.strength)
+ req.ecc.strength = chip->base.eccreq.strength;
else
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
if (chip->ecc.size)
req.ecc.sectorsize = chip->ecc.size;
- else if (chip->ecc_step_ds)
- req.ecc.sectorsize = chip->ecc_step_ds;
+ else if (chip->base.eccreq.step_size)
+ req.ecc.sectorsize = chip->base.eccreq.step_size;
else
req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
@@ -1507,13 +1511,20 @@ static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
atmel_nand_init(nc, nand);
smc_nc = to_smc_nand_controller(chip->controller);
- if (!smc_nc->matrix)
+ if (!smc_nc->ebi_csa_regmap)
return;
/* Attach the CS to the NAND Flash logic. */
for (i = 0; i < nand->numcs; i++)
- regmap_update_bits(smc_nc->matrix, smc_nc->ebi_csa_offs,
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
BIT(nand->cs[i].id), BIT(nand->cs[i].id));
+
+ if (smc_nc->ebi_csa->nfd0_on_d16)
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
+ smc_nc->ebi_csa->nfd0_on_d16,
+ smc_nc->ebi_csa->nfd0_on_d16);
}
static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
@@ -1797,7 +1808,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
ret = of_property_read_u32(np, "#size-cells", &val);
if (ret) {
- dev_err(dev, "missing #address-cells property\n");
+ dev_err(dev, "missing #size-cells property\n");
return ret;
}
@@ -1833,34 +1844,71 @@ static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
clk_put(nc->mck);
}
-static const struct of_device_id atmel_matrix_of_ids[] = {
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
+ .offs = AT91SAM9260_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
+ .offs = AT91SAM9261_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
+ .offs = AT91SAM9263_MATRIX_EBI0CSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
+ .offs = AT91SAM9RL_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
+ .offs = AT91SAM9G45_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
+ .offs = AT91SAM9N12_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
+ .offs = AT91SAM9X5_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
+ .offs = AT91_SFR_CCFG_EBICSA,
+ .nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
+};
+
+static const struct of_device_id atmel_ebi_csa_regmap_of_ids[] = {
{
.compatible = "atmel,at91sam9260-matrix",
- .data = (void *)AT91SAM9260_MATRIX_EBICSA,
+ .data = &at91sam9260_ebi_csa,
},
{
.compatible = "atmel,at91sam9261-matrix",
- .data = (void *)AT91SAM9261_MATRIX_EBICSA,
+ .data = &at91sam9261_ebi_csa,
},
{
.compatible = "atmel,at91sam9263-matrix",
- .data = (void *)AT91SAM9263_MATRIX_EBI0CSA,
+ .data = &at91sam9263_ebi_csa,
},
{
.compatible = "atmel,at91sam9rl-matrix",
- .data = (void *)AT91SAM9RL_MATRIX_EBICSA,
+ .data = &at91sam9rl_ebi_csa,
},
{
.compatible = "atmel,at91sam9g45-matrix",
- .data = (void *)AT91SAM9G45_MATRIX_EBICSA,
+ .data = &at91sam9g45_ebi_csa,
},
{
.compatible = "atmel,at91sam9n12-matrix",
- .data = (void *)AT91SAM9N12_MATRIX_EBICSA,
+ .data = &at91sam9n12_ebi_csa,
},
{
.compatible = "atmel,at91sam9x5-matrix",
- .data = (void *)AT91SAM9X5_MATRIX_EBICSA,
+ .data = &at91sam9x5_ebi_csa,
+ },
+ {
+ .compatible = "microchip,sam9x60-sfr",
+ .data = &sam9x60_ebi_csa,
},
{ /* sentinel */ },
};
@@ -1982,37 +2030,38 @@ atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
struct device_node *np;
int ret;
- /* We do not retrieve the matrix syscon when parsing old DTs. */
+ /* We do not retrieve the EBICSA regmap when parsing old DTs. */
if (nc->base.caps->legacy_of_bindings)
return 0;
- np = of_parse_phandle(dev->parent->of_node, "atmel,matrix", 0);
+ np = of_parse_phandle(dev->parent->of_node,
+ nc->base.caps->ebi_csa_regmap_name, 0);
if (!np)
return 0;
- match = of_match_node(atmel_matrix_of_ids, np);
+ match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
if (!match) {
of_node_put(np);
return 0;
}
- nc->matrix = syscon_node_to_regmap(np);
+ nc->ebi_csa_regmap = syscon_node_to_regmap(np);
of_node_put(np);
- if (IS_ERR(nc->matrix)) {
- ret = PTR_ERR(nc->matrix);
- dev_err(dev, "Could not get Matrix regmap (err = %d)\n", ret);
+ if (IS_ERR(nc->ebi_csa_regmap)) {
+ ret = PTR_ERR(nc->ebi_csa_regmap);
+ dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
return ret;
}
- nc->ebi_csa_offs = (uintptr_t)match->data;
+ nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
/*
* The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
- * add 4 to ->ebi_csa_offs.
+ * add 4 to ->ebi_csa->offs.
*/
if (of_device_is_compatible(dev->parent->of_node,
"atmel,at91sam9263-ebi1"))
- nc->ebi_csa_offs += 4;
+ nc->ebi_csa->offs += 4;
return 0;
}
@@ -2341,6 +2390,7 @@ static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
.ops = &at91rm9200_nc_ops,
};
@@ -2355,12 +2405,14 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
.ops = &atmel_smc_nc_ops,
};
static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
.ale_offs = BIT(22),
.cle_offs = BIT(21),
+ .ebi_csa_regmap_name = "atmel,matrix",
.ops = &atmel_smc_nc_ops,
};
@@ -2368,6 +2420,15 @@ static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
.has_dma = true,
.ale_offs = BIT(21),
.cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "microchip,sfr",
.ops = &atmel_smc_nc_ops,
};
@@ -2415,6 +2476,10 @@ static const struct of_device_id atmel_nand_controller_of_ids[] = {
.compatible = "atmel,sama5d3-nand-controller",
.data = &atmel_sama5_nc_caps,
},
+ {
+ .compatible = "microchip,sam9x60-nand-controller",
+ .data = &microchip_sam9x60_nc_caps,
+ },
/* Support for old/deprecated bindings: */
{
.compatible = "atmel,at91rm9200-nand",
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 9d3997840889..cbb023bf00f7 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 ATMEL
* Copyright 2017 Free Electrons
@@ -28,10 +29,6 @@
* Add Nand Flash Controller support for SAMA5 SoC
* Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* The PMECC is an hardware assisted BCH engine, which means part of the
* ECC algorithm is left to the software. The hardware/software repartition
* is explained in the "PMECC Controller Functional Description" chapter in
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h
index 808f1be0d6ad..7851c05126cf 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.h
+++ b/drivers/mtd/nand/raw/atmel/pmecc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* © Copyright 2016 ATMEL
* © Copyright 2016 Free Electrons
@@ -28,11 +29,6 @@
*
* Add Nand Flash Controller support for SAMA5 SoC
* © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef ATMEL_PMECC_H
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index a37cbfe56567..a53ffb3d64b0 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -428,7 +428,7 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
}
/* Configure FLASH */
- chipsize = b47n->nand_chip.chipsize >> 20;
+ chipsize = nanddev_target_size(&b47n->nand_chip.base) >> 20;
tbits = ffs(chipsize); /* find first bit set */
if (!tbits || tbits != fls(chipsize)) {
pr_err("Invalid flash size: 0x%lX\n", chipsize);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 482c6f093f99..ce0b8ffc7812 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -1676,11 +1676,8 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
int page = addr >> chip->page_shift;
int ret;
- if (!buf) {
- buf = chip->data_buf;
- /* Invalidate page cache */
- chip->pagebuf = -1;
- }
+ if (!buf)
+ buf = nand_get_data_buf(chip);
sas = mtd->oobsize / chip->ecc.steps;
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 24aeafc67cd4..3102ddbd8abd 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -3,7 +3,7 @@
* NAND Flash Controller Device Driver
* Copyright © 2009-2010, Intel Corporation and its suppliers.
*
- * Copyright (c) 2017 Socionext Inc.
+ * Copyright (c) 2017-2019 Socionext Inc.
* Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
*/
@@ -40,11 +40,16 @@
#define DENALI_BANK(denali) ((denali)->active_bank << 24)
#define DENALI_INVALID_BANK -1
-#define DENALI_NR_BANKS 4
-static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
+static struct denali_chip *to_denali_chip(struct nand_chip *chip)
{
- return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
+ return container_of(chip, struct denali_chip, chip);
+}
+
+static struct denali_controller *to_denali_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct denali_controller,
+ controller);
}
/*
@@ -52,12 +57,12 @@ static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
* type, bank, block, and page address). The slave data is the actual data to
* be transferred. This mode requires 28 bits of address region allocated.
*/
-static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+static u32 denali_direct_read(struct denali_controller *denali, u32 addr)
{
return ioread32(denali->host + addr);
}
-static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+static void denali_direct_write(struct denali_controller *denali, u32 addr,
u32 data)
{
iowrite32(data, denali->host + addr);
@@ -69,77 +74,62 @@ static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
* control information and transferred data are latched by the registers in
* the translation module.
*/
-static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+static u32 denali_indexed_read(struct denali_controller *denali, u32 addr)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
return ioread32(denali->host + DENALI_INDEXED_DATA);
}
-static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+static void denali_indexed_write(struct denali_controller *denali, u32 addr,
u32 data)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
-/*
- * Use the configuration feature register to determine the maximum number of
- * banks that the hardware supports.
- */
-static void denali_detect_max_banks(struct denali_nand_info *denali)
-{
- uint32_t features = ioread32(denali->reg + FEATURES);
-
- denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
-
- /* the encoding changed from rev 5.0 to 5.1 */
- if (denali->revision < 0x0501)
- denali->max_banks <<= 1;
-}
-
-static void denali_enable_irq(struct denali_nand_info *denali)
+static void denali_enable_irq(struct denali_controller *denali)
{
int i;
- for (i = 0; i < DENALI_NR_BANKS; i++)
+ for (i = 0; i < denali->nbanks; i++)
iowrite32(U32_MAX, denali->reg + INTR_EN(i));
iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
}
-static void denali_disable_irq(struct denali_nand_info *denali)
+static void denali_disable_irq(struct denali_controller *denali)
{
int i;
- for (i = 0; i < DENALI_NR_BANKS; i++)
+ for (i = 0; i < denali->nbanks; i++)
iowrite32(0, denali->reg + INTR_EN(i));
iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
}
-static void denali_clear_irq(struct denali_nand_info *denali,
- int bank, uint32_t irq_status)
+static void denali_clear_irq(struct denali_controller *denali,
+ int bank, u32 irq_status)
{
/* write one to clear bits */
iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
}
-static void denali_clear_irq_all(struct denali_nand_info *denali)
+static void denali_clear_irq_all(struct denali_controller *denali)
{
int i;
- for (i = 0; i < DENALI_NR_BANKS; i++)
+ for (i = 0; i < denali->nbanks; i++)
denali_clear_irq(denali, i, U32_MAX);
}
static irqreturn_t denali_isr(int irq, void *dev_id)
{
- struct denali_nand_info *denali = dev_id;
+ struct denali_controller *denali = dev_id;
irqreturn_t ret = IRQ_NONE;
- uint32_t irq_status;
+ u32 irq_status;
int i;
spin_lock(&denali->irq_lock);
- for (i = 0; i < DENALI_NR_BANKS; i++) {
+ for (i = 0; i < denali->nbanks; i++) {
irq_status = ioread32(denali->reg + INTR_STATUS(i));
if (irq_status)
ret = IRQ_HANDLED;
@@ -160,7 +150,7 @@ static irqreturn_t denali_isr(int irq, void *dev_id)
return ret;
}
-static void denali_reset_irq(struct denali_nand_info *denali)
+static void denali_reset_irq(struct denali_controller *denali)
{
unsigned long flags;
@@ -170,11 +160,10 @@ static void denali_reset_irq(struct denali_nand_info *denali)
spin_unlock_irqrestore(&denali->irq_lock, flags);
}
-static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
- uint32_t irq_mask)
+static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask)
{
unsigned long time_left, flags;
- uint32_t irq_status;
+ u32 irq_status;
spin_lock_irqsave(&denali->irq_lock, flags);
@@ -201,128 +190,259 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
return denali->irq_status;
}
-static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+static void denali_select_target(struct nand_chip *chip, int cs)
{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs];
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- int i;
- for (i = 0; i < len; i++)
- buf[i] = denali->host_read(denali, addr);
+ denali->active_bank = sel->bank;
+
+ iowrite32(1 << (chip->phys_erase_shift - chip->page_shift),
+ denali->reg + PAGES_PER_BLOCK);
+ iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
+ denali->reg + DEVICE_WIDTH);
+ iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ?
+ 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
+ denali->reg + ECC_CORRECTION);
+ iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS);
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return;
+
+ /* update timing registers unless NAND_KEEP_TIMINGS is set */
+ iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE);
+ iowrite32(sel->tcwaw_and_addr_2_data,
+ denali->reg + TCWAW_AND_ADDR_2_DATA);
+ iowrite32(sel->re_2_we, denali->reg + RE_2_WE);
+ iowrite32(sel->acc_clks, denali->reg + ACC_CLKS);
+ iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT);
+ iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT);
+ iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT);
+ iowrite32(sel->re_2_re, denali->reg + RE_2_RE);
}
-static void denali_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
+static int denali_change_column(struct nand_chip *chip, unsigned int offset,
+ void *buf, unsigned int len, bool write)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- int i;
-
- for (i = 0; i < len; i++)
- denali->host_write(denali, addr, buf[i]);
+ if (write)
+ return nand_change_write_column_op(chip, offset, buf, len,
+ false);
+ else
+ return nand_change_read_column_op(chip, offset, buf, len,
+ false);
}
-static void denali_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
+static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- uint16_t *buf16 = (uint16_t *)buf;
- int i;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = i * (ecc->size + ecc->bytes);
+ len = ecc->size;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
- for (i = 0; i < len / 2; i++)
- buf16[i] = denali->host_read(denali, addr);
+ buf += len;
+ }
+
+ return 0;
}
-static void denali_write_buf16(struct nand_chip *chip, const uint8_t *buf,
- int len)
+static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
- const uint16_t *buf16 = (const uint16_t *)buf;
- int i;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
- for (i = 0; i < len / 2; i++)
- denali->host_write(denali, addr, buf16[i]);
+ /* BBM at the beginning of the OOB area */
+ ret = denali_change_column(chip, writesize, buf, oob_skip, write);
+ if (ret)
+ return ret;
+
+ buf += oob_skip;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = ecc->size + i * (ecc->size + ecc->bytes);
+
+ if (i == ecc->steps - 1)
+ /* The last chunk includes OOB free */
+ len = writesize + oobsize - pos - oob_skip;
+ else
+ len = ecc->bytes;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
+
+ buf += len;
+ }
+
+ return 0;
}
-static uint8_t denali_read_byte(struct nand_chip *chip)
+static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
+ int page)
{
- uint8_t byte;
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
- denali_read_buf(chip, &byte, 1);
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- return byte;
+ if (buf) {
+ ret = denali_payload_xfer(chip, buf, false);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, oob_buf, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
-static void denali_write_byte(struct nand_chip *chip, uint8_t byte)
+static int denali_write_raw(struct nand_chip *chip, const void *buf,
+ const void *oob_buf, int page)
{
- denali_write_buf(chip, &byte, 1);
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (buf) {
+ ret = denali_payload_xfer(chip, (void *)buf, true);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, (void *)oob_buf, true);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
}
-static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
+static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- uint32_t type;
+ return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
- if (ctrl & NAND_CLE)
- type = DENALI_MAP11_CMD;
- else if (ctrl & NAND_ALE)
- type = DENALI_MAP11_ADDR;
- else
- return;
+static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
- /*
- * Some commands are followed by chip->legacy.waitfunc.
- * irq_status must be cleared here to catch the R/B# interrupt later.
- */
- if (ctrl & NAND_CTRL_CHANGE)
- denali_reset_irq(denali);
+static int denali_read_oob(struct nand_chip *chip, int page)
+{
+ return denali_read_raw(chip, NULL, chip->oob_poi, page);
+}
- denali->host_write(denali, DENALI_BANK(denali) | type, dat);
+static int denali_write_oob(struct nand_chip *chip, int page)
+{
+ return denali_write_raw(chip, NULL, chip->oob_poi, page);
}
-static int denali_check_erased_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
+static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int i, stat;
- for (i = 0; i < ecc_steps; i++) {
+ for (i = 0; i < ecc->steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
continue;
- stat = nand_check_erased_ecc_chunk(buf, ecc_size,
- ecc_code, ecc_bytes,
- NULL, 0,
- chip->ecc.strength);
+ stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code,
+ ecc->bytes, NULL, 0,
+ ecc->strength);
if (stat < 0) {
- mtd->ecc_stats.failed++;
+ ecc_stats->failed++;
} else {
- mtd->ecc_stats.corrected += stat;
+ ecc_stats->corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
- buf += ecc_size;
- ecc_code += ecc_bytes;
+ buf += ecc->size;
+ ecc_code += ecc->bytes;
}
return max_bitflips;
}
-static int denali_hw_ecc_fixup(struct mtd_info *mtd,
- struct denali_nand_info *denali,
+static int denali_hw_ecc_fixup(struct nand_chip *chip,
unsigned long *uncor_ecc_flags)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
int bank = denali->active_bank;
- uint32_t ecc_cor;
+ u32 ecc_cor;
unsigned int max_bitflips;
ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
@@ -346,23 +466,24 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
* Unfortunately, we can not know the total number of corrected bits in
* the page. Increase the stats by max_bitflips. (compromised solution)
*/
- mtd->ecc_stats.corrected += max_bitflips;
+ ecc_stats->corrected += max_bitflips;
return max_bitflips;
}
-static int denali_sw_ecc_fixup(struct mtd_info *mtd,
- struct denali_nand_info *denali,
- unsigned long *uncor_ecc_flags, uint8_t *buf)
+static int denali_sw_ecc_fixup(struct nand_chip *chip,
+ unsigned long *uncor_ecc_flags, u8 *buf)
{
- unsigned int ecc_size = denali->nand.ecc.size;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ unsigned int ecc_size = chip->ecc.size;
unsigned int bitflips = 0;
unsigned int max_bitflips = 0;
- uint32_t err_addr, err_cor_info;
+ u32 err_addr, err_cor_info;
unsigned int err_byte, err_sector, err_device;
- uint8_t err_cor_value;
+ u8 err_cor_value;
unsigned int prev_sector = 0;
- uint32_t irq_status;
+ u32 irq_status;
denali_reset_irq(denali);
@@ -404,7 +525,7 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
/* correct the ECC error */
flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
buf[offset] ^= err_cor_value;
- mtd->ecc_stats.corrected += flips_in_byte;
+ ecc_stats->corrected += flips_in_byte;
bitflips += flips_in_byte;
max_bitflips = max(max_bitflips, bitflips);
@@ -424,10 +545,10 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-static void denali_setup_dma64(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
+static void denali_setup_dma64(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
{
- uint32_t mode;
+ u32 mode;
const int page_count = 1;
mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
@@ -439,7 +560,8 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* burst len = 64 bytes, the number of pages
*/
denali->host_write(denali, mode,
- 0x01002000 | (64 << 16) | (write << 8) | page_count);
+ 0x01002000 | (64 << 16) |
+ (write ? BIT(8) : 0) | page_count);
/* 2. set memory low address */
denali->host_write(denali, mode, lower_32_bits(dma_addr));
@@ -448,10 +570,10 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
-static void denali_setup_dma32(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
+static void denali_setup_dma32(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
{
- uint32_t mode;
+ u32 mode;
const int page_count = 1;
mode = DENALI_MAP10 | DENALI_BANK(denali);
@@ -460,7 +582,7 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* 1. setup transfer type and # of pages */
denali->host_write(denali, mode | page,
- 0x2000 | (write << 8) | page_count);
+ 0x2000 | (write ? BIT(8) : 0) | page_count);
/* 2. set memory high address bits 23:8 */
denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
@@ -472,12 +594,11 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
denali->host_write(denali, mode | 0x14000, 0x2400);
}
-static int denali_pio_read(struct denali_nand_info *denali, void *buf,
+static int denali_pio_read(struct denali_controller *denali, u32 *buf,
size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
- uint32_t *buf32 = (uint32_t *)buf;
- uint32_t irq_status, ecc_err_mask;
+ u32 irq_status, ecc_err_mask;
int i;
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
@@ -488,7 +609,7 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
for (i = 0; i < size / 4; i++)
- *buf32++ = denali->host_read(denali, addr);
+ buf[i] = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -500,29 +621,29 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
return irq_status & ecc_err_mask ? -EBADMSG : 0;
}
-static int denali_pio_write(struct denali_nand_info *denali,
- const void *buf, size_t size, int page)
+static int denali_pio_write(struct denali_controller *denali, const u32 *buf,
+ size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
- const uint32_t *buf32 = (uint32_t *)buf;
- uint32_t irq_status;
+ u32 irq_status;
int i;
denali_reset_irq(denali);
for (i = 0; i < size / 4; i++)
- denali->host_write(denali, addr, *buf32++);
+ denali->host_write(denali, addr, buf[i]);
irq_status = denali_wait_for_irq(denali,
- INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
+ INTR__PROGRAM_COMP |
+ INTR__PROGRAM_FAIL);
if (!(irq_status & INTR__PROGRAM_COMP))
return -EIO;
return 0;
}
-static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int write)
+static int denali_pio_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
{
if (write)
return denali_pio_write(denali, buf, size, page);
@@ -530,11 +651,11 @@ static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
return denali_pio_read(denali, buf, size, page);
}
-static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int write)
+static int denali_dma_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
{
dma_addr_t dma_addr;
- uint32_t irq_mask, irq_status, ecc_err_mask;
+ u32 irq_mask, irq_status, ecc_err_mask;
enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = 0;
@@ -587,12 +708,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
return ret;
}
-static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw, int write)
+static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
+ int page, bool write)
{
- iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
- iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
- denali->reg + TRANSFER_SPARE_REG);
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ denali_select_target(chip, chip->cur_cs);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, write);
@@ -600,180 +721,23 @@ static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
return denali_pio_xfer(denali, buf, size, page, write);
}
-static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int write)
-{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- int writesize = mtd->writesize;
- int oobsize = mtd->oobsize;
- uint8_t *bufpoi = chip->oob_poi;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
- int oob_skip = denali->oob_skip_bytes;
- size_t size = writesize + oobsize;
- int i, pos, len;
-
- /* BBM at the beginning of the OOB area */
- if (write)
- nand_prog_page_begin_op(chip, page, writesize, bufpoi,
- oob_skip);
- else
- nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
- bufpoi += oob_skip;
-
- /* OOB ECC */
- for (i = 0; i < ecc_steps; i++) {
- pos = ecc_size + i * (ecc_size + ecc_bytes);
- len = ecc_bytes;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- if (write)
- nand_change_write_column_op(chip, pos, bufpoi, len,
- false);
- else
- nand_change_read_column_op(chip, pos, bufpoi, len,
- false);
- bufpoi += len;
- if (len < ecc_bytes) {
- len = ecc_bytes - len;
- if (write)
- nand_change_write_column_op(chip, writesize +
- oob_skip, bufpoi,
- len, false);
- else
- nand_change_read_column_op(chip, writesize +
- oob_skip, bufpoi,
- len, false);
- bufpoi += len;
- }
- }
-
- /* OOB free */
- len = oobsize - (bufpoi - chip->oob_poi);
- if (write)
- nand_change_write_column_op(chip, size - len, bufpoi, len,
- false);
- else
- nand_change_read_column_op(chip, size - len, bufpoi, len,
- false);
-}
-
-static int denali_read_page_raw(struct nand_chip *chip, uint8_t *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- int writesize = mtd->writesize;
- int oobsize = mtd->oobsize;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
- void *tmp_buf = denali->buf;
- int oob_skip = denali->oob_skip_bytes;
- size_t size = writesize + oobsize;
- int ret, i, pos, len;
-
- ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
- if (ret)
- return ret;
-
- /* Arrange the buffer for syndrome payload/ecc layout */
- if (buf) {
- for (i = 0; i < ecc_steps; i++) {
- pos = i * (ecc_size + ecc_bytes);
- len = ecc_size;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(buf, tmp_buf + pos, len);
- buf += len;
- if (len < ecc_size) {
- len = ecc_size - len;
- memcpy(buf, tmp_buf + writesize + oob_skip,
- len);
- buf += len;
- }
- }
- }
-
- if (oob_required) {
- uint8_t *oob = chip->oob_poi;
-
- /* BBM at the beginning of the OOB area */
- memcpy(oob, tmp_buf + writesize, oob_skip);
- oob += oob_skip;
-
- /* OOB ECC */
- for (i = 0; i < ecc_steps; i++) {
- pos = ecc_size + i * (ecc_size + ecc_bytes);
- len = ecc_bytes;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(oob, tmp_buf + pos, len);
- oob += len;
- if (len < ecc_bytes) {
- len = ecc_bytes - len;
- memcpy(oob, tmp_buf + writesize + oob_skip,
- len);
- oob += len;
- }
- }
-
- /* OOB free */
- len = oobsize - (oob - chip->oob_poi);
- memcpy(oob, tmp_buf + size - len, len);
- }
-
- return 0;
-}
-
-static int denali_read_oob(struct nand_chip *chip, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- denali_oob_xfer(mtd, chip, page, 0);
-
- return 0;
-}
-
-static int denali_write_oob(struct nand_chip *chip, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- denali_oob_xfer(mtd, chip, page, 1);
-
- return nand_prog_page_end_op(chip);
-}
-
-static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
+static int denali_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
unsigned long uncor_ecc_flags = 0;
int stat = 0;
int ret;
- ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
+ ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
if (ret && ret != -EBADMSG)
return ret;
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
- stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
+ stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
else if (ret == -EBADMSG)
- stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
+ stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
if (stat < 0)
return stat;
@@ -783,130 +747,32 @@ static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
if (ret)
return ret;
- stat = denali_check_erased_page(mtd, chip, buf,
+ stat = denali_check_erased_page(chip, buf,
uncor_ecc_flags, stat);
}
return stat;
}
-static int denali_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- int writesize = mtd->writesize;
- int oobsize = mtd->oobsize;
- int ecc_steps = chip->ecc.steps;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
- void *tmp_buf = denali->buf;
- int oob_skip = denali->oob_skip_bytes;
- size_t size = writesize + oobsize;
- int i, pos, len;
-
- /*
- * Fill the buffer with 0xff first except the full page transfer.
- * This simplifies the logic.
- */
- if (!buf || !oob_required)
- memset(tmp_buf, 0xff, size);
-
- /* Arrange the buffer for syndrome payload/ecc layout */
- if (buf) {
- for (i = 0; i < ecc_steps; i++) {
- pos = i * (ecc_size + ecc_bytes);
- len = ecc_size;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(tmp_buf + pos, buf, len);
- buf += len;
- if (len < ecc_size) {
- len = ecc_size - len;
- memcpy(tmp_buf + writesize + oob_skip, buf,
- len);
- buf += len;
- }
- }
- }
-
- if (oob_required) {
- const uint8_t *oob = chip->oob_poi;
-
- /* BBM at the beginning of the OOB area */
- memcpy(tmp_buf + writesize, oob, oob_skip);
- oob += oob_skip;
-
- /* OOB ECC */
- for (i = 0; i < ecc_steps; i++) {
- pos = ecc_size + i * (ecc_size + ecc_bytes);
- len = ecc_bytes;
-
- if (pos >= writesize)
- pos += oob_skip;
- else if (pos + len > writesize)
- len = writesize - pos;
-
- memcpy(tmp_buf + pos, oob, len);
- oob += len;
- if (len < ecc_bytes) {
- len = ecc_bytes - len;
- memcpy(tmp_buf + writesize + oob_skip, oob,
- len);
- oob += len;
- }
- }
-
- /* OOB free */
- len = oobsize - (oob - chip->oob_poi);
- memcpy(tmp_buf + size - len, oob, len);
- }
-
- return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
-}
-
-static int denali_write_page(struct nand_chip *chip, const uint8_t *buf,
+static int denali_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
- return denali_data_xfer(denali, (void *)buf, mtd->writesize,
- page, 0, 1);
-}
-
-static void denali_select_chip(struct nand_chip *chip, int cs)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
-
- denali->active_bank = cs;
-}
-
-static int denali_waitfunc(struct nand_chip *chip)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- uint32_t irq_status;
-
- /* R/B# pin transitioned from low to high? */
- irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
-
- return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
+ return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
}
static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf)
{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel;
const struct nand_sdr_timings *timings;
unsigned long t_x, mult_x;
int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
int addr_2_data_mask;
- uint32_t tmp;
+ u32 tmp;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
@@ -929,6 +795,8 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
+ sel = &to_denali_chip(chip)->sels[chipnr];
+
/* tREA -> ACC_CLKS */
acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
@@ -936,7 +804,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
- iowrite32(tmp, denali->reg + ACC_CLKS);
+ sel->acc_clks = tmp;
/* tRWH -> RE_2_WE */
re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
@@ -945,7 +813,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
- iowrite32(tmp, denali->reg + RE_2_WE);
+ sel->re_2_we = tmp;
/* tRHZ -> RE_2_RE */
re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
@@ -954,7 +822,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
- iowrite32(tmp, denali->reg + RE_2_RE);
+ sel->re_2_re = tmp;
/*
* tCCS, tWHR -> WE_2_RE
@@ -968,7 +836,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
- iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
+ sel->hwhr2_and_we_2_re = tmp;
/* tADL -> ADDR_2_DATA */
@@ -983,7 +851,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
- iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
+ sel->tcwaw_and_addr_2_data = tmp;
/* tREH, tWH -> RDWR_EN_HI_CNT */
rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
@@ -993,7 +861,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
- iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
+ sel->rdwr_en_hi_cnt = tmp;
/* tRP, tWP -> RDWR_EN_LO_CNT */
rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
@@ -1006,7 +874,7 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
- iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
+ sel->rdwr_en_lo_cnt = tmp;
/* tCS, tCEA -> CS_SETUP_CNT */
cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
@@ -1017,39 +885,11 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
- iowrite32(tmp, denali->reg + CS_SETUP_CNT);
+ sel->cs_setup_cnt = tmp;
return 0;
}
-static void denali_hw_init(struct denali_nand_info *denali)
-{
- /*
- * The REVISION register may not be reliable. Platforms are allowed to
- * override it.
- */
- if (!denali->revision)
- denali->revision = swab16(ioread32(denali->reg + REVISION));
-
- /*
- * Set how many bytes should be skipped before writing data in OOB.
- * If a non-zero value has already been set (by firmware or something),
- * just use it. Otherwise, set the driver default.
- */
- denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- if (!denali->oob_skip_bytes) {
- denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
- iowrite32(denali->oob_skip_bytes,
- denali->reg + SPARE_AREA_SKIP_BYTES);
- }
-
- denali_detect_max_banks(denali);
- iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
- iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
-
- iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-}
-
int denali_calc_ecc_bytes(int step_size, int strength)
{
/* BCH code. Denali requires ecc.bytes to be multiple of 2 */
@@ -1060,10 +900,10 @@ EXPORT_SYMBOL(denali_calc_ecc_bytes);
static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
- if (section)
+ if (section > 0)
return -ERANGE;
oobregion->offset = denali->oob_skip_bytes;
@@ -1075,10 +915,10 @@ static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
static int denali_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
- if (section)
+ if (section > 0)
return -ERANGE;
oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
@@ -1092,10 +932,13 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-static int denali_multidev_fixup(struct denali_nand_info *denali)
+static int denali_multidev_fixup(struct nand_chip *chip)
{
- struct nand_chip *chip = &denali->nand;
+ struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
/*
* Support for multi device:
@@ -1125,11 +968,12 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
}
/* 2 chips in parallel */
+ memorg->pagesize <<= 1;
+ memorg->oobsize <<= 1;
mtd->size <<= 1;
mtd->erasesize <<= 1;
mtd->writesize <<= 1;
mtd->oobsize <<= 1;
- chip->chipsize <<= 1;
chip->page_shift += 1;
chip->phys_erase_shift += 1;
chip->bbt_erase_shift += 1;
@@ -1145,38 +989,10 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
static int denali_attach_chip(struct nand_chip *chip)
{
+ struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
int ret;
- if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
- denali->dma_avail = 1;
-
- if (denali->dma_avail) {
- int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
-
- ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
- if (ret) {
- dev_info(denali->dev,
- "Failed to set DMA mask. Disabling DMA.\n");
- denali->dma_avail = 0;
- }
- }
-
- if (denali->dma_avail) {
- chip->options |= NAND_USE_BOUNCE_BUFFER;
- chip->buf_align = 16;
- if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali->setup_dma = denali_setup_dma64;
- else
- denali->setup_dma = denali_setup_dma32;
- }
-
- chip->bbt_options |= NAND_BBT_USE_FLASH;
- chip->bbt_options |= NAND_BBT_NO_OOB;
- chip->ecc.mode = NAND_ECC_HW_SYNDROME;
- chip->options |= NAND_NO_SUBPAGE_WRITE;
-
ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
mtd->oobsize - denali->oob_skip_bytes);
if (ret) {
@@ -1188,123 +1004,230 @@ static int denali_attach_chip(struct nand_chip *chip)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
- iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
- FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
- denali->reg + ECC_CORRECTION);
- iowrite32(mtd->erasesize / mtd->writesize,
- denali->reg + PAGES_PER_BLOCK);
- iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
- denali->reg + DEVICE_WIDTH);
- iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
- denali->reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
- iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+ ret = denali_multidev_fixup(chip);
+ if (ret)
+ return ret;
- iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
- iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
- /* chip->ecc.steps is set by nand_scan_tail(); not available here */
- iowrite32(mtd->writesize / chip->ecc.size,
- denali->reg + CFG_NUM_DATA_BLOCKS);
+ return 0;
+}
- mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+static void denali_exec_in8(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ int i;
- if (chip->options & NAND_BUSWIDTH_16) {
- chip->legacy.read_buf = denali_read_buf16;
- chip->legacy.write_buf = denali_write_buf16;
- } else {
- chip->legacy.read_buf = denali_read_buf;
- chip->legacy.write_buf = denali_write_buf;
+ for (i = 0; i < len; i++)
+ buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
+}
+
+static void denali_exec_in16(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i += 2) {
+ data = denali->host_read(denali, type | DENALI_BANK(denali));
+ /* bit 31:24 and 15:8 are used for DDR */
+ buf[i] = data;
+ buf[i + 1] = data >> 16;
}
- chip->ecc.read_page = denali_read_page;
- chip->ecc.read_page_raw = denali_read_page_raw;
- chip->ecc.write_page = denali_write_page;
- chip->ecc.write_page_raw = denali_write_page_raw;
- chip->ecc.read_oob = denali_read_oob;
- chip->ecc.write_oob = denali_write_oob;
+}
- ret = denali_multidev_fixup(denali);
- if (ret)
- return ret;
+static void denali_exec_in(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_in16(denali, type, buf, len);
+ else
+ denali_exec_in8(denali, type, buf, len);
+}
- /*
- * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
- * use devm_kmalloc() because the memory allocated by devm_ does not
- * guarantee DMA-safe alignment.
- */
- denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
- if (!denali->buf)
- return -ENOMEM;
+static void denali_exec_out8(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
+{
+ int i;
- return 0;
+ for (i = 0; i < len; i++)
+ denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
}
-static void denali_detach_chip(struct nand_chip *chip)
+static void denali_exec_out16(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ denali->host_write(denali, type | DENALI_BANK(denali),
+ buf[i + 1] << 16 | buf[i]);
+}
- kfree(denali->buf);
+static void denali_exec_out(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_out16(denali, type, buf, len);
+ else
+ denali_exec_out8(denali, type, buf, len);
+}
+
+static int denali_exec_waitrdy(struct denali_controller *denali)
+{
+ u32 irq_stat;
+
+ /* R/B# pin transitioned from low to high? */
+ irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
+
+ /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
+ denali_reset_irq(denali);
+
+ return irq_stat & INTR__INT_ACT ? 0 : -EIO;
+}
+
+static int denali_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_CMD,
+ &instr->ctx.cmd.opcode, 1);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_ADDR,
+ instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ denali_exec_in(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ denali_exec_out(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return denali_exec_waitrdy(denali);
+ default:
+ WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
+ instr->type);
+
+ return -EINVAL;
+ }
+}
+
+static int denali_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ int i, ret;
+
+ if (check_only)
+ return 0;
+
+ denali_select_target(chip, op->cs);
+
+ /*
+ * Some commands contain NAND_OP_WAITRDY_INSTR.
+ * irq must be cleared here to catch the R/B# interrupt there.
+ */
+ denali_reset_irq(to_denali_controller(chip));
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = denali_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static const struct nand_controller_ops denali_controller_ops = {
.attach_chip = denali_attach_chip,
- .detach_chip = denali_detach_chip,
+ .exec_op = denali_exec_op,
.setup_data_interface = denali_setup_data_interface,
};
-int denali_init(struct denali_nand_info *denali)
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip)
{
- struct nand_chip *chip = &denali->nand;
+ struct nand_chip *chip = &dchip->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- u32 features = ioread32(denali->reg + FEATURES);
- int ret;
+ struct denali_chip *dchip2;
+ int i, j, ret;
- mtd->dev.parent = denali->dev;
- denali_hw_init(denali);
+ chip->controller = &denali->controller;
- init_completion(&denali->complete);
- spin_lock_init(&denali->irq_lock);
+ /* sanity checks for bank numbers */
+ for (i = 0; i < dchip->nsels; i++) {
+ unsigned int bank = dchip->sels[i].bank;
- denali_clear_irq_all(denali);
+ if (bank >= denali->nbanks) {
+ dev_err(denali->dev, "unsupported bank %d\n", bank);
+ return -EINVAL;
+ }
- ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
- IRQF_SHARED, DENALI_NAND_NAME, denali);
- if (ret) {
- dev_err(denali->dev, "Unable to request IRQ\n");
- return ret;
- }
+ for (j = 0; j < i; j++) {
+ if (bank == dchip->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is assigned twice in the same chip\n",
+ bank);
+ return -EINVAL;
+ }
+ }
- denali_enable_irq(denali);
+ list_for_each_entry(dchip2, &denali->chips, node) {
+ for (j = 0; j < dchip2->nsels; j++) {
+ if (bank == dchip2->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is already used\n",
+ bank);
+ return -EINVAL;
+ }
+ }
+ }
+ }
- denali->active_bank = DENALI_INVALID_BANK;
+ mtd->dev.parent = denali->dev;
- nand_set_flash_node(chip, denali->dev->of_node);
- /* Fallback to the default name if DT did not give "label" property */
- if (!mtd->name)
+ /*
+ * Fallback to the default name if DT did not give "label" property.
+ * Use "label" property if multiple chips are connected.
+ */
+ if (!mtd->name && list_empty(&denali->chips))
mtd->name = "denali-nand";
- chip->legacy.select_chip = denali_select_chip;
- chip->legacy.read_byte = denali_read_byte;
- chip->legacy.write_byte = denali_write_byte;
- chip->legacy.cmd_ctrl = denali_cmd_ctrl;
- chip->legacy.waitfunc = denali_waitfunc;
-
- if (features & FEATURES__INDEX_ADDR) {
- denali->host_read = denali_indexed_read;
- denali->host_write = denali_indexed_write;
- } else {
- denali->host_read = denali_direct_read;
- denali->host_write = denali_direct_write;
+ if (denali->dma_avail) {
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+ chip->buf_align = 16;
}
/* clk rate info is needed for setup_data_interface */
if (!denali->clk_rate || !denali->clk_x_rate)
chip->options |= NAND_KEEP_TIMINGS;
- chip->legacy.dummy_controller.ops = &denali_controller_ops;
- ret = nand_scan(chip, denali->max_banks);
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.read_page = denali_read_page;
+ chip->ecc.write_page = denali_write_page;
+ chip->ecc.read_page_raw = denali_read_page_raw;
+ chip->ecc.write_page_raw = denali_write_page_raw;
+ chip->ecc.read_oob = denali_read_oob;
+ chip->ecc.write_oob = denali_write_oob;
+
+ mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+
+ ret = nand_scan(chip, dchip->nsels);
if (ret)
- goto disable_irq;
+ return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
@@ -1312,20 +1235,111 @@ int denali_init(struct denali_nand_info *denali)
goto cleanup_nand;
}
+ list_add_tail(&dchip->node, &denali->chips);
+
return 0;
cleanup_nand:
nand_cleanup(chip);
-disable_irq:
- denali_disable_irq(denali);
return ret;
}
+EXPORT_SYMBOL_GPL(denali_chip_init);
+
+int denali_init(struct denali_controller *denali)
+{
+ u32 features = ioread32(denali->reg + FEATURES);
+ int ret;
+
+ nand_controller_init(&denali->controller);
+ denali->controller.ops = &denali_controller_ops;
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
+ INIT_LIST_HEAD(&denali->chips);
+ denali->active_bank = DENALI_INVALID_BANK;
+
+ /*
+ * The REVISION register may not be reliable. Platforms are allowed to
+ * override it.
+ */
+ if (!denali->revision)
+ denali->revision = swab16(ioread32(denali->reg + REVISION));
+
+ denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
+
+ /* the encoding changed from rev 5.0 to 5.1 */
+ if (denali->revision < 0x0501)
+ denali->nbanks <<= 1;
+
+ if (features & FEATURES__DMA)
+ denali->dma_avail = true;
+
+ if (denali->dma_avail) {
+ int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
+
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
+ if (ret) {
+ dev_info(denali->dev,
+ "Failed to set DMA mask. Disabling DMA.\n");
+ denali->dma_avail = false;
+ }
+ }
+
+ if (denali->dma_avail) {
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
+ }
+
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
+ /*
+ * Set how many bytes should be skipped before writing data in OOB.
+ * If a non-zero value has already been set (by firmware or something),
+ * just use it. Otherwise, set the driver's default.
+ */
+ denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
+ if (!denali->oob_skip_bytes) {
+ denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
+ iowrite32(denali->oob_skip_bytes,
+ denali->reg + SPARE_AREA_SKIP_BYTES);
+ }
+
+ iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
+ iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
+ iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+
+ denali_clear_irq_all(denali);
+
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
+ }
+
+ denali_enable_irq(denali);
+
+ return 0;
+}
EXPORT_SYMBOL(denali_init);
-void denali_remove(struct denali_nand_info *denali)
+void denali_remove(struct denali_controller *denali)
{
- nand_release(&denali->nand);
+ struct denali_chip *dchip;
+
+ list_for_each_entry(dchip, &denali->chips, node)
+ nand_release(&dchip->chip);
+
denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index c8c2620fc736..e5cdcda56d14 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -9,6 +9,7 @@
#include <linux/bits.h>
#include <linux/completion.h>
+#include <linux/list.h>
#include <linux/mtd/rawnand.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
@@ -290,38 +291,108 @@
#define CHNL_ACTIVE__CHANNEL2 BIT(2)
#define CHNL_ACTIVE__CHANNEL3 BIT(3)
-struct denali_nand_info {
- struct nand_chip nand;
- unsigned long clk_rate; /* core clock rate */
- unsigned long clk_x_rate; /* bus interface clock rate */
- int active_bank; /* currently selected bank */
+/**
+ * struct denali_chip_sel - per-CS data of Denali NAND
+ *
+ * @bank: bank id of the controller this CS is connected to
+ * @hwhr2_and_we_2_re: value of timing register HWHR2_AND_WE_2_RE
+ * @tcwaw_and_addr_2_data: value of timing register TCWAW_AND_ADDR_2_DATA
+ * @re_2_we: value of timing register RE_2_WE
+ * @acc_clks: value of timing register ACC_CLKS
+ * @rdwr_en_lo_cnt: value of timing register RDWR_EN_LO_CNT
+ * @rdwr_en_hi_cnt: value of timing register RDWR_EN_HI_CNT
+ * @cs_setup_cnt: value of timing register CS_SETUP_CNT
+ * @re_2_re: value of timing register RE_2_RE
+ */
+struct denali_chip_sel {
+ int bank;
+ u32 hwhr2_and_we_2_re;
+ u32 tcwaw_and_addr_2_data;
+ u32 re_2_we;
+ u32 acc_clks;
+ u32 rdwr_en_lo_cnt;
+ u32 rdwr_en_hi_cnt;
+ u32 cs_setup_cnt;
+ u32 re_2_re;
+};
+
+/**
+ * struct denali_chip - per-chip data of Denali NAND
+ *
+ * @chip: base NAND chip structure
+ * @node: node to be used to associate this chip with the controller
+ * @nsels: the number of CS lines of this chip
+ * @sels: the array of per-cs data
+ */
+struct denali_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ unsigned int nsels;
+ struct denali_chip_sel sels[0];
+};
+
+/**
+ * struct denali_controller - Denali NAND controller data
+ *
+ * @controller: base NAND controller structure
+ * @dev: device
+ * @chips: the list of chips attached to this controller
+ * @clk_rate: frequency of core clock
+ * @clk_x_rate: frequency of bus interface clock
+ * @reg: base of Register Interface
+ * @host: base of Host Data/Command interface
+ * @complete: completion used to wait for interrupts
+ * @irq: interrupt number
+ * @irq_mask: interrupt bits the controller is waiting for
+ * @irq_status: interrupt bits of events that have happened
+ * @irq_lock: lock to protect @irq_mask and @irq_status
+ * @dma_avail: set if DMA engine is available
+ * @devs_per_cs: number of devices connected in parallel
+ * @oob_skip_bytes: number of bytes in OOB skipped by the ECC engine
+ * @active_bank: active bank id
+ * @nbanks: the number of banks supported by this controller
+ * @revision: IP revision
+ * @caps: controller capabilities that cannot be detected run-time
+ * @ecc_caps: ECC engine capabilities
+ * @host_read: callback for read access of Host Data/Command Interface
+ * @host_write: callback for write access of Host Data/Command Interface
+ * @setup_dma: callback for setup of the Data DMA
+ */
+struct denali_controller {
+ struct nand_controller controller;
struct device *dev;
- void __iomem *reg; /* Register Interface */
- void __iomem *host; /* Host Data/Command Interface */
+ struct list_head chips;
+ unsigned long clk_rate;
+ unsigned long clk_x_rate;
+ void __iomem *reg;
+ void __iomem *host;
struct completion complete;
- spinlock_t irq_lock; /* protect irq_mask and irq_status */
- u32 irq_mask; /* interrupts we are waiting for */
- u32 irq_status; /* interrupts that have happened */
int irq;
- void *buf; /* for syndrome layout conversion */
- int dma_avail; /* can support DMA? */
- int devs_per_cs; /* devices connected in parallel */
- int oob_skip_bytes; /* number of bytes reserved for BBM */
- int max_banks;
- unsigned int revision; /* IP revision */
- unsigned int caps; /* IP capability (or quirk) */
+ u32 irq_mask;
+ u32 irq_status;
+ spinlock_t irq_lock;
+ bool dma_avail;
+ int devs_per_cs;
+ int oob_skip_bytes;
+ int active_bank;
+ int nbanks;
+ unsigned int revision;
+ unsigned int caps;
const struct nand_ecc_caps *ecc_caps;
- u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
- void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
- void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
- int page, int write);
+ u32 (*host_read)(struct denali_controller *denali, u32 addr);
+ void (*host_write)(struct denali_controller *denali, u32 addr,
+ u32 data);
+ void (*setup_dma)(struct denali_controller *denali, dma_addr_t dma_addr,
+ int page, bool write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
#define DENALI_CAP_DMA_64BIT BIT(1)
int denali_calc_ecc_bytes(int step_size, int strength);
-int denali_init(struct denali_nand_info *denali);
-void denali_remove(struct denali_nand_info *denali);
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip);
+int denali_init(struct denali_controller *denali);
+void denali_remove(struct denali_controller *denali);
#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 0b5ae2418815..5e14836f6bd5 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -18,7 +18,7 @@
#include "denali.h"
struct denali_dt {
- struct denali_nand_info denali;
+ struct denali_controller controller;
struct clk *clk; /* core clock */
struct clk *clk_x; /* bus interface clock */
struct clk *clk_ecc; /* ECC circuit clock */
@@ -71,19 +71,92 @@ static const struct of_device_id denali_nand_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+static int denali_dt_chip_init(struct denali_controller *denali,
+ struct device_node *chip_np)
+{
+ struct denali_chip *dchip;
+ u32 bank;
+ int nsels, i, ret;
+
+ nsels = of_property_count_u32_elems(chip_np, "reg");
+ if (nsels < 0)
+ return nsels;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip)
+ return -ENOMEM;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(chip_np, "reg", i, &bank);
+ if (ret)
+ return ret;
+
+ dchip->sels[i].bank = bank;
+
+ nand_set_flash_node(&dchip->chip, chip_np);
+ }
+
+ return denali_chip_init(denali, dchip);
+}
+
+/* Backward compatibility for old platforms */
+static int denali_dt_legacy_chip_init(struct denali_controller *denali)
+{
+ struct denali_chip *dchip;
+ int nsels, i;
+
+ nsels = denali->nbanks;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip)
+ return -ENOMEM;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++)
+ dchip->sels[i].bank = i;
+
+ nand_set_flash_node(&dchip->chip, denali->dev->of_node);
+
+ return denali_chip_init(denali, dchip);
+}
+
+/*
+ * Check the DT binding.
+ * The new binding expects chip subnodes in the controller node.
+ * So, #address-cells = <1>; #size-cells = <0>; are required.
+ * Check the #size-cells to distinguish the binding.
+ */
+static bool denali_dt_is_legacy_binding(struct device_node *np)
+{
+ u32 cells;
+ int ret;
+
+ ret = of_property_read_u32(np, "#size-cells", &cells);
+ if (ret)
+ return true;
+
+ return cells != 0;
+}
+
static int denali_dt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct denali_dt *dt;
const struct denali_dt_data *data;
- struct denali_nand_info *denali;
+ struct denali_controller *denali;
+ struct device_node *np;
int ret;
dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
- denali = &dt->denali;
+ denali = &dt->controller;
data = of_device_get_match_data(dev);
if (data) {
@@ -140,9 +213,26 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk_ecc;
+ if (denali_dt_is_legacy_binding(dev->of_node)) {
+ ret = denali_dt_legacy_chip_init(denali);
+ if (ret)
+ goto out_remove_denali;
+ } else {
+ for_each_child_of_node(dev->of_node, np) {
+ ret = denali_dt_chip_init(denali, np);
+ if (ret) {
+ of_node_put(np);
+ goto out_remove_denali;
+ }
+ }
+ }
+
platform_set_drvdata(pdev, dt);
+
return 0;
+out_remove_denali:
+ denali_remove(denali);
out_disable_clk_ecc:
clk_disable_unprepare(dt->clk_ecc);
out_disable_clk_x:
@@ -157,7 +247,7 @@ static int denali_dt_remove(struct platform_device *pdev)
{
struct denali_dt *dt = platform_get_drvdata(pdev);
- denali_remove(&dt->denali);
+ denali_remove(&dt->controller);
clk_disable_unprepare(dt->clk_ecc);
clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 48e9ac54ad53..d62aa5271753 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -29,10 +29,11 @@ NAND_ECC_CAPS_SINGLE(denali_pci_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15);
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int ret;
resource_size_t csr_base, mem_base;
unsigned long csr_len, mem_len;
- struct denali_nand_info *denali;
+ struct denali_controller *denali;
+ struct denali_chip *dchip;
+ int nsels, ret, i;
denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
if (!denali)
@@ -64,7 +65,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->dev = &dev->dev;
denali->irq = dev->irq;
denali->ecc_caps = &denali_pci_ecc_caps;
- denali->nand.ecc.options |= NAND_ECC_MAXIMIZE;
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
@@ -84,27 +84,49 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
ret = -ENOMEM;
- goto failed_remap_reg;
+ goto out_unmap_reg;
}
ret = denali_init(denali);
if (ret)
- goto failed_remap_mem;
+ goto out_unmap_host;
+
+ nsels = denali->nbanks;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip) {
+ ret = -ENOMEM;
+ goto out_remove_denali;
+ }
+
+ dchip->chip.ecc.options |= NAND_ECC_MAXIMIZE;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++)
+ dchip->sels[i].bank = i;
+
+ ret = denali_chip_init(denali, dchip);
+ if (ret)
+ goto out_remove_denali;
pci_set_drvdata(dev, denali);
return 0;
-failed_remap_mem:
+out_remove_denali:
+ denali_remove(denali);
+out_unmap_host:
iounmap(denali->host);
-failed_remap_reg:
+out_unmap_reg:
iounmap(denali->reg);
return ret;
}
static void denali_pci_remove(struct pci_dev *dev)
{
- struct denali_nand_info *denali = pci_get_drvdata(dev);
+ struct denali_controller *denali = pci_get_drvdata(dev);
denali_remove(denali);
iounmap(denali->reg);
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 53f57e0f007e..f430c4bf0323 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1028,6 +1028,7 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
+ struct nand_memory_organization *memorg;
int ret = 0;
u_char *buf;
struct NFTLMediaHeader *mh;
@@ -1036,6 +1037,8 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
unsigned blocks, maxblocks;
int offs, numheaders;
+ memorg = nanddev_get_memorg(&this->base);
+
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
return 0;
@@ -1082,6 +1085,7 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
implementation of the NAND layer. */
if (mh->UnitSizeFactor != 0xff) {
this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ memorg->pages_per_eraseblock <<= (0xff - mh->UnitSizeFactor);
mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
pr_info("Setting virtual erase size to %d\n", mtd->erasesize);
blocks = mtd->size >> this->bbt_erase_shift;
@@ -1287,7 +1291,7 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
struct doc_priv *doc = nand_get_controller_data(this);
struct mtd_partition parts[5];
- if (this->numchips > doc->chips_per_floor) {
+ if (nanddev_ntargets(&this->base) > doc->chips_per_floor) {
pr_err("Multi-floor INFTL devices not yet supported.\n");
return -EIO;
}
@@ -1477,6 +1481,7 @@ static int __init doc_probe(unsigned long physadr)
break;
case DOC_ChipID_DocMilPlus32:
pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ /* fall through */
default:
ret = -ENODEV;
goto notfound;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 70f0d2b450ea..423828ff68e6 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -355,6 +355,15 @@ static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
fsl_elbc_run_command(mtd);
return;
+ /* RNDOUT moves the pointer inside the page */
+ case NAND_CMD_RNDOUT:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_RNDOUT, column: 0x%x.\n",
+ column);
+
+ elbc_fcm_ctrl->index = column;
+ return;
+
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
dev_vdbg(priv->dev,
@@ -635,79 +644,6 @@ static int fsl_elbc_wait(struct nand_chip *chip)
return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
-static int fsl_elbc_attach_chip(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
- struct fsl_lbc_ctrl *ctrl = priv->ctrl;
- struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
- unsigned int al;
-
- /* calculate FMR Address Length field */
- al = 0;
- if (chip->pagemask & 0xffff0000)
- al++;
- if (chip->pagemask & 0xff000000)
- al++;
-
- priv->fmr |= al << FMR_AL_SHIFT;
-
- dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
- chip->numchips);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
- chip->chipsize);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
- chip->pagemask);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
- chip->legacy.chip_delay);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
- chip->badblockpos);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
- chip->chip_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
- chip->page_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
- chip->phys_erase_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
- chip->ecc.mode);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
- chip->ecc.steps);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
- chip->ecc.bytes);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
- chip->ecc.total);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
- mtd->ooblayout);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
- mtd->erasesize);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
- mtd->writesize);
- dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
- mtd->oobsize);
-
- /* adjust Option Register and ECC to match Flash page size */
- if (mtd->writesize == 512) {
- priv->page_size = 0;
- clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
- } else if (mtd->writesize == 2048) {
- priv->page_size = 1;
- setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
- } else {
- dev_err(priv->dev,
- "fsl_elbc_init: page size %d is not supported\n",
- mtd->writesize);
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static const struct nand_controller_ops fsl_elbc_controller_ops = {
- .attach_chip = fsl_elbc_attach_chip,
-};
-
static int fsl_elbc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
@@ -794,27 +730,116 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->controller = &elbc_fcm_ctrl->controller;
nand_set_controller_data(chip, priv);
- chip->ecc.read_page = fsl_elbc_read_page;
- chip->ecc.write_page = fsl_elbc_write_page;
- chip->ecc.write_subpage = fsl_elbc_write_subpage;
-
- /* If CS Base Register selects full hardware ECC then use it */
- if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
- BR_DECC_CHK_GEN) {
- chip->ecc.mode = NAND_ECC_HW;
- mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.strength = 1;
+ return 0;
+}
+
+static int fsl_elbc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ unsigned int al;
+
+ switch (chip->ecc.mode) {
+ /*
+ * if ECC was not chosen in DT, decide whether to use HW or SW ECC from
+ * CS Base Register
+ */
+ case NAND_ECC_NONE:
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ } else {
+ /* otherwise fall back to default software ECC */
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ }
+ break;
+
+ /* if SW ECC was chosen in DT, we do not need to set anything here */
+ case NAND_ECC_SOFT:
+ break;
+
+ /* should we also implement NAND_ECC_HW to do as the code above? */
+ default:
+ return -EINVAL;
+ }
+
+ /* calculate FMR Address Length field */
+ al = 0;
+ if (chip->pagemask & 0xffff0000)
+ al++;
+ if (chip->pagemask & 0xff000000)
+ al++;
+
+ priv->fmr |= al << FMR_AL_SHIFT;
+
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ nanddev_ntargets(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+ nanddev_target_size(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ chip->pagemask);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
+ chip->legacy.chip_delay);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ chip->badblockpos);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ chip->chip_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ chip->page_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ chip->ecc.total);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
+ mtd->ooblayout);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ mtd->erasesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ mtd->writesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ mtd->oobsize);
+
+ /* adjust Option Register and ECC to match Flash page size */
+ if (mtd->writesize == 512) {
+ priv->page_size = 0;
+ clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else if (mtd->writesize == 2048) {
+ priv->page_size = 1;
+ setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
} else {
- /* otherwise fall back to default software ECC */
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ dev_err(priv->dev,
+ "fsl_elbc_init: page size %d is not supported\n",
+ mtd->writesize);
+ return -ENOTSUPP;
}
return 0;
}
+static const struct nand_controller_ops fsl_elbc_controller_ops = {
+ .attach_chip = fsl_elbc_attach_chip,
+};
+
static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index e65d274399f9..04a3dcd675bf 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -722,9 +722,9 @@ static int fsl_ifc_attach_chip(struct nand_chip *chip)
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
- chip->numchips);
+ nanddev_ntargets(&chip->base));
dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
- chip->chipsize);
+ nanddev_target_size(&chip->base));
dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
chip->pagemask);
dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__,
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index a4768df5083f..a8b26d2e793c 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -157,8 +157,7 @@ int gpmi_init(struct gpmi_nand_data *this)
* Reset BCH here, too. We got failures otherwise :(
* See later BCH reset for explanation of MX23 and MX28 handling
*/
- ret = gpmi_reset_block(r->bch_regs,
- GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
if (ret)
goto err_out;
@@ -266,8 +265,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
* and MX28.
*/
- ret = gpmi_reset_block(r->bch_regs,
- GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index ed405c9434fe..40df20d1adf5 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -171,7 +171,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
struct bch_geometry *geo = &this->bch_geometry;
/* Do the sanity check. */
- if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+ if (GPMI_IS_MXS(this)) {
/* The mx23/mx28 only support the GF13. */
if (geo->gf_len == 14)
return false;
@@ -204,7 +204,8 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
default:
dev_err(this->dev,
"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
- chip->ecc_strength_ds, chip->ecc_step_ds);
+ chip->base.eccreq.strength,
+ chip->base.eccreq.step_size);
return -EINVAL;
}
geo->ecc_chunk_size = ecc_step;
@@ -417,11 +418,13 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
|| legacy_set_geometry(this)) {
- if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+ if (!(chip->base.eccreq.strength > 0 &&
+ chip->base.eccreq.step_size > 0))
return -EINVAL;
- return set_geometry_by_ecc_info(this, chip->ecc_strength_ds,
- chip->ecc_step_ds);
+ return set_geometry_by_ecc_info(this,
+ chip->base.eccreq.strength,
+ chip->base.eccreq.step_size);
}
return 0;
@@ -1602,7 +1605,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->data_buf;
+ u8 *buffer = nand_get_data_buf(chip);
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1664,7 +1667,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->data_buf;
+ u8 *buffer = nand_get_data_buf(chip);
int saved_chip_number;
int status;
@@ -1753,7 +1756,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
dev_dbg(dev, "Transcribing bad block marks...\n");
/* Compute the number of blocks in the entire medium. */
- block_count = chip->chipsize >> chip->phys_erase_shift;
+ block_count = nanddev_eraseblocks_per_target(&chip->base);
/*
* Loop over all the blocks in the medium, transcribing block marks as
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index d0b79bac2728..a804a4a5bd46 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -207,4 +207,5 @@ void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \
GPMI_IS_MX7D(x))
+#define GPMI_IS_MXS(x) (GPMI_IS_MX23(x) || GPMI_IS_MX28(x))
#endif
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index f3f9aa160cff..e4526fff9da4 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -849,7 +849,7 @@ static int hisi_nfc_resume(struct device *dev)
struct hinfc_host *host = dev_get_drvdata(dev);
struct nand_chip *chip = &host->chip;
- for (cs = 0; cs < chip->numchips; cs++)
+ for (cs = 0; cs < nanddev_ntargets(&chip->base); cs++)
hisi_nfc_send_cmd_reset(host, cs);
hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
new file mode 100644
index 000000000000..7cfc77021154
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -0,0 +1,50 @@
+config MTD_NAND_JZ4740
+ tristate "JZ4740 NAND controller"
+ depends on MACH_JZ4740 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash on JZ4740 SoC based boards.
+
+config MTD_NAND_JZ4780
+ tristate "JZ4780 NAND controller"
+ depends on JZ4780_NEMC
+ help
+ Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
+ based boards, using the BCH controller for hardware error correction.
+
+if MTD_NAND_JZ4780
+
+config MTD_NAND_INGENIC_ECC
+ tristate
+
+config MTD_NAND_JZ4740_ECC
+ tristate "Hardware BCH support for JZ4740 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the Reed-Solomon error-correction
+ hardware present on the JZ4740 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4740-ecc.
+
+config MTD_NAND_JZ4725B_BCH
+ tristate "Hardware BCH support for JZ4725B SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4725B SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4725b-bch.
+
+config MTD_NAND_JZ4780_BCH
+ tristate "Hardware BCH support for JZ4780 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4780 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4780-bch.
+
+endif # MTD_NAND_JZ4780
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
new file mode 100644
index 000000000000..ab2c5f47e5b7
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
+
+obj-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
+obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
+obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
new file mode 100644
index 000000000000..d3e085c5685a
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ47xx ECC common code
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+/**
+ * ingenic_ecc_calculate() - calculate ECC for a data buffer
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: input buffer with raw data.
+ * @ecc_code: output buffer with ECC.
+ *
+ * Return: 0 on success, -ETIMEDOUT if timed out while waiting for ECC
+ * controller.
+ */
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->calculate(ecc, params, buf, ecc_code);
+}
+EXPORT_SYMBOL(ingenic_ecc_calculate);
+
+/**
+ * ingenic_ecc_correct() - detect and correct bit errors
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: raw data read from the chip.
+ * @ecc_code: ECC read from the chip.
+ *
+ * Given the raw data and the ECC read from the NAND device, detects and
+ * corrects errors in the data.
+ *
+ * Return: the number of bit errors corrected, -EBADMSG if there are too many
+ * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
+ */
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->correct(ecc, params, buf, ecc_code);
+}
+EXPORT_SYMBOL(ingenic_ecc_correct);
+
+/**
+ * ingenic_ecc_get() - get the ECC controller device
+ * @np: ECC device tree node.
+ *
+ * Gets the ECC controller device from the specified device tree node. The
+ * device must be released with ingenic_ecc_release() when it is no longer being
+ * used.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct ingenic_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&pdev->dev);
+
+ ecc = platform_get_drvdata(pdev);
+ clk_prepare_enable(ecc->clk);
+
+ return ecc;
+}
+
+/**
+ * of_ingenic_ecc_get() - get the ECC controller from a DT node
+ * @of_node: the node that contains an ecc-engine property.
+ *
+ * Get the ecc-engine property from the given device tree
+ * node and pass it to ingenic_ecc_get to do the work.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
+{
+ struct ingenic_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+
+ /*
+ * If the ecc-engine property is not found, check for the deprecated
+ * ingenic,bch-controller property
+ */
+ if (!np)
+ np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
+
+ if (np) {
+ ecc = ingenic_ecc_get(np);
+ of_node_put(np);
+ }
+ return ecc;
+}
+EXPORT_SYMBOL(of_ingenic_ecc_get);
+
+/**
+ * ingenic_ecc_release() - release the ECC controller device
+ * @ecc: ECC device.
+ */
+void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+EXPORT_SYMBOL(ingenic_ecc_release);
+
+int ingenic_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_ecc *ecc;
+ struct resource *res;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ ecc->ops = device_get_match_data(dev);
+ if (!ecc->ops)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ecc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ecc->base))
+ return PTR_ERR(ecc->base);
+
+ ecc->ops->disable(ecc);
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ mutex_init(&ecc->lock);
+
+ ecc->dev = dev;
+ platform_set_drvdata(pdev, ecc);
+
+ return 0;
+}
+EXPORT_SYMBOL(ingenic_ecc_probe);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic ECC common driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
new file mode 100644
index 000000000000..2cda439b5e11
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+#define __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <uapi/asm-generic/errno-base.h>
+
+struct clk;
+struct device;
+struct ingenic_ecc;
+struct platform_device;
+
+/**
+ * struct ingenic_ecc_params - ECC parameters
+ * @size: data bytes per ECC step.
+ * @bytes: ECC bytes per step.
+ * @strength: number of correctable bits per ECC step.
+ */
+struct ingenic_ecc_params {
+ int size;
+ int bytes;
+ int strength;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_INGENIC_ECC)
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code);
+
+void ingenic_ecc_release(struct ingenic_ecc *ecc);
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
+#else /* CONFIG_MTD_NAND_INGENIC_ECC */
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+}
+
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_MTD_NAND_INGENIC_ECC */
+
+struct ingenic_ecc_ops {
+ void (*disable)(struct ingenic_ecc *ecc);
+ int (*calculate)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+ int (*correct)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code);
+};
+
+struct ingenic_ecc {
+ struct device *dev;
+ const struct ingenic_ecc_ops *ops;
+ void __iomem *base;
+ struct clk *clk;
+ struct mutex lock;
+};
+
+int ingenic_ecc_probe(struct platform_device *pdev);
+
+#endif /* __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__ */
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
new file mode 100644
index 000000000000..d7b7c0f13909
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic JZ47xx NAND driver
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/jz4780-nemc.h>
+
+#include "ingenic_ecc.h"
+
+#define DRV_NAME "ingenic-nand"
+
+/* Command delay when there is no R/B pin. */
+#define RB_DELAY_US 100
+
+struct jz_soc_info {
+ unsigned long data_offset;
+ unsigned long addr_offset;
+ unsigned long cmd_offset;
+ const struct mtd_ooblayout_ops *oob_layout;
+};
+
+struct ingenic_nand_cs {
+ unsigned int bank;
+ void __iomem *base;
+};
+
+struct ingenic_nfc {
+ struct device *dev;
+ struct ingenic_ecc *ecc;
+ const struct jz_soc_info *soc_info;
+ struct nand_controller controller;
+ unsigned int num_banks;
+ struct list_head chips;
+ int selected;
+ struct ingenic_nand_cs cs[];
+};
+
+struct ingenic_nand {
+ struct nand_chip chip;
+ struct list_head chip_list;
+
+ struct gpio_desc *busy_gpio;
+ struct gpio_desc *wp_gpio;
+ unsigned int reading: 1;
+};
+
+static inline struct ingenic_nand *to_ingenic_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct ingenic_nand, chip);
+}
+
+static inline struct ingenic_nfc *to_ingenic_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct ingenic_nfc, controller);
+}
+
+static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 12;
+
+ return 0;
+}
+
+static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 12;
+ oobregion->offset = 12 + ecc->total;
+
+ return 0;
+}
+
+const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+ .ecc = qi_lb60_ooblayout_ecc,
+ .free = qi_lb60_ooblayout_free,
+};
+
+static int jz4725b_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 3;
+
+ return 0;
+}
+
+static int jz4725b_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 3;
+ oobregion->offset = 3 + ecc->total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = {
+ .ecc = jz4725b_ooblayout_ecc,
+ .free = jz4725b_ooblayout_free,
+};
+
+static void ingenic_nand_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_nand_cs *cs;
+
+ /* Ensure the currently selected chip is deasserted. */
+ if (chipnr == -1 && nfc->selected >= 0) {
+ cs = &nfc->cs[nfc->selected];
+ jz4780_nemc_assert(nfc->dev, cs->bank, false);
+ }
+
+ nfc->selected = chipnr;
+}
+
+static void ingenic_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_nand_cs *cs;
+
+ if (WARN_ON(nfc->selected < 0))
+ return;
+
+ cs = &nfc->cs[nfc->selected];
+
+ jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_ALE)
+ writeb(cmd, cs->base + nfc->soc_info->addr_offset);
+ else if (ctrl & NAND_CLE)
+ writeb(cmd, cs->base + nfc->soc_info->cmd_offset);
+}
+
+static int ingenic_nand_dev_ready(struct nand_chip *chip)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+
+ return !gpiod_get_value_cansleep(nand->busy_gpio);
+}
+
+static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+
+ nand->reading = (mode == NAND_ECC_READ);
+}
+
+static int ingenic_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
+ u8 *ecc_code)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ /*
+ * Don't need to generate the ECC when reading, the ECC engine does it
+ * for us as part of decoding/correction.
+ */
+ if (nand->reading)
+ return 0;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_calculate(nfc->ecc, &params, dat, ecc_code);
+}
+
+static int ingenic_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_correct(nfc->ecc, &params, dat, read_ecc);
+}
+
+static int ingenic_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+ int eccbytes;
+
+ if (chip->ecc.strength == 4) {
+ /* JZ4740 uses 9 bytes of ECC to correct maximum 4 errors */
+ chip->ecc.bytes = 9;
+ } else {
+ chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
+ (chip->ecc.strength / 8);
+ }
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_HW:
+ if (!nfc->ecc) {
+ dev_err(nfc->dev, "HW ECC selected, but ECC controller not found\n");
+ return -ENODEV;
+ }
+
+ chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
+ chip->ecc.calculate = ingenic_nand_ecc_calculate;
+ chip->ecc.correct = ingenic_nand_ecc_correct;
+ /* fall through */
+ case NAND_ECC_SOFT:
+ dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
+ (nfc->ecc) ? "hardware ECC" : "software ECC",
+ chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
+ break;
+ case NAND_ECC_NONE:
+ dev_info(nfc->dev, "not using ECC\n");
+ break;
+ default:
+ dev_err(nfc->dev, "ECC mode %d not supported\n",
+ chip->ecc.mode);
+ return -EINVAL;
+ }
+
+ /* The NAND core will generate the ECC layout for SW ECC */
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ /* Generate ECC layout. ECC codes are right aligned in the OOB area. */
+ eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
+
+ if (eccbytes > mtd->oobsize - 2) {
+ dev_err(nfc->dev,
+ "invalid ECC config: required %d ECC bytes, but only %d are available",
+ eccbytes, mtd->oobsize - 2);
+ return -EINVAL;
+ }
+
+ /*
+ * The generic layout for BBT markers will most likely overlap with our
+ * ECC bytes in the OOB, so move the BBT markers outside the OOB area.
+ */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* For legacy reasons we use a different layout on the qi,lb60 board. */
+ if (of_machine_is_compatible("qi,lb60"))
+ mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, nfc->soc_info->oob_layout);
+
+ return 0;
+}
+
+static const struct nand_controller_ops ingenic_nand_controller_ops = {
+ .attach_chip = ingenic_nand_attach_chip,
+};
+
+static int ingenic_nand_init_chip(struct platform_device *pdev,
+ struct ingenic_nfc *nfc,
+ struct device_node *np,
+ unsigned int chipnr)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_nand *nand;
+ struct ingenic_nand_cs *cs;
+ struct resource *res;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ const __be32 *reg;
+ int ret = 0;
+
+ cs = &nfc->cs[chipnr];
+
+ reg = of_get_property(np, "reg", NULL);
+ if (!reg)
+ return -EINVAL;
+
+ cs->bank = be32_to_cpu(*reg);
+
+ jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, chipnr);
+ cs->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cs->base))
+ return PTR_ERR(cs->base);
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
+
+ if (IS_ERR(nand->busy_gpio)) {
+ ret = PTR_ERR(nand->busy_gpio);
+ dev_err(dev, "failed to request busy GPIO: %d\n", ret);
+ return ret;
+ } else if (nand->busy_gpio) {
+ nand->chip.legacy.dev_ready = ingenic_nand_dev_ready;
+ }
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
+ cs->bank);
+ if (!mtd->name)
+ return -ENOMEM;
+ mtd->dev.parent = dev;
+
+ chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset;
+ chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset;
+ chip->legacy.chip_delay = RB_DELAY_US;
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+ chip->legacy.select_chip = ingenic_nand_select_chip;
+ chip->legacy.cmd_ctrl = ingenic_nand_cmd_ctrl;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->controller->ops = &ingenic_nand_controller_ops;
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_release(chip);
+ return ret;
+ }
+
+ list_add_tail(&nand->chip_list, &nfc->chips);
+
+ return 0;
+}
+
+static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
+{
+ struct ingenic_nand *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips,
+ struct ingenic_nand, chip_list);
+ nand_release(&chip->chip);
+ list_del(&chip->chip_list);
+ }
+}
+
+static int ingenic_nand_init_chips(struct ingenic_nfc *nfc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ int i = 0;
+ int ret;
+ int num_chips = of_get_child_count(dev->of_node);
+
+ if (num_chips > nfc->num_banks) {
+ dev_err(dev, "found %d chips but only %d banks\n",
+ num_chips, nfc->num_banks);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = ingenic_nand_init_chip(pdev, nfc, np, i);
+ if (ret) {
+ ingenic_nand_cleanup_chips(nfc);
+ return ret;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int ingenic_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int num_banks;
+ struct ingenic_nfc *nfc;
+ int ret;
+
+ num_banks = jz4780_nemc_num_banks(dev);
+ if (num_banks == 0) {
+ dev_err(dev, "no banks found\n");
+ return -ENODEV;
+ }
+
+ nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->soc_info = device_get_match_data(dev);
+ if (!nfc->soc_info)
+ return -EINVAL;
+
+ /*
+ * Check for ECC HW before we call nand_scan_ident, to prevent us from
+ * having to call it again if the ECC driver returns -EPROBE_DEFER.
+ */
+ nfc->ecc = of_ingenic_ecc_get(dev->of_node);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+
+ nfc->dev = dev;
+ nfc->num_banks = num_banks;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ ret = ingenic_nand_init_chips(nfc, pdev);
+ if (ret) {
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+ return 0;
+}
+
+static int ingenic_nand_remove(struct platform_device *pdev)
+{
+ struct ingenic_nfc *nfc = platform_get_drvdata(pdev);
+
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+
+ ingenic_nand_cleanup_chips(nfc);
+
+ return 0;
+}
+
+static const struct jz_soc_info jz4740_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+ .oob_layout = &nand_ooblayout_lp_ops,
+};
+
+static const struct jz_soc_info jz4725b_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+ .oob_layout = &jz4725b_ooblayout_ops,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00400000,
+ .addr_offset = 0x00800000,
+ .oob_layout = &nand_ooblayout_lp_ops,
+};
+
+static const struct of_device_id ingenic_nand_dt_match[] = {
+ { .compatible = "ingenic,jz4740-nand", .data = &jz4740_soc_info },
+ { .compatible = "ingenic,jz4725b-nand", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4780-nand", .data = &jz4780_soc_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ingenic_nand_dt_match);
+
+static struct platform_driver ingenic_nand_driver = {
+ .probe = ingenic_nand_probe,
+ .remove = ingenic_nand_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ingenic_nand_dt_match),
+ },
+};
+module_platform_driver(ingenic_nand_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ47xx NAND driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
new file mode 100644
index 000000000000..6c852eae09cf
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4725B BCH controller driver
+ *
+ * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * Based on jz4780_bch.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define BCH_BHCR 0x0
+#define BCH_BHCSR 0x4
+#define BCH_BHCCR 0x8
+#define BCH_BHCNT 0xc
+#define BCH_BHDR 0x10
+#define BCH_BHPAR0 0x14
+#define BCH_BHERR0 0x28
+#define BCH_BHINT 0x24
+#define BCH_BHINTES 0x3c
+#define BCH_BHINTEC 0x40
+#define BCH_BHINTE 0x38
+
+#define BCH_BHCR_ENCE BIT(3)
+#define BCH_BHCR_BSEL BIT(2)
+#define BCH_BHCR_INIT BIT(1)
+#define BCH_BHCR_BCHE BIT(0)
+
+#define BCH_BHCNT_DEC_COUNT_SHIFT 16
+#define BCH_BHCNT_DEC_COUNT_MASK (0x3ff << BCH_BHCNT_DEC_COUNT_SHIFT)
+#define BCH_BHCNT_ENC_COUNT_SHIFT 0
+#define BCH_BHCNT_ENC_COUNT_MASK (0x3ff << BCH_BHCNT_ENC_COUNT_SHIFT)
+
+#define BCH_BHERR_INDEX0_SHIFT 0
+#define BCH_BHERR_INDEX0_MASK (0x1fff << BCH_BHERR_INDEX0_SHIFT)
+#define BCH_BHERR_INDEX1_SHIFT 16
+#define BCH_BHERR_INDEX1_MASK (0x1fff << BCH_BHERR_INDEX1_SHIFT)
+
+#define BCH_BHINT_ERRC_SHIFT 28
+#define BCH_BHINT_ERRC_MASK (0xf << BCH_BHINT_ERRC_SHIFT)
+#define BCH_BHINT_TERRC_SHIFT 16
+#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
+#define BCH_BHINT_ALL_0 BIT(5)
+#define BCH_BHINT_ALL_F BIT(4)
+#define BCH_BHINT_DECF BIT(3)
+#define BCH_BHINT_ENCF BIT(2)
+#define BCH_BHINT_UNCOR BIT(1)
+#define BCH_BHINT_ERR BIT(0)
+
+/* Timeout for BCH calculation/correction. */
+#define BCH_TIMEOUT_US 100000
+
+static inline void jz4725b_bch_config_set(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCSR);
+}
+
+static inline void jz4725b_bch_config_clear(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCCR);
+}
+
+static int jz4725b_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool calc_ecc)
+{
+ u32 reg, max_value;
+
+ /* Clear interrupt status. */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Initialise and enable BCH. */
+ jz4725b_bch_config_clear(bch, 0x1f);
+ jz4725b_bch_config_set(bch, BCH_BHCR_BCHE);
+
+ if (params->strength == 8)
+ jz4725b_bch_config_set(bch, BCH_BHCR_BSEL);
+ else
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BSEL);
+
+ if (calc_ecc) /* calculate ECC from data */
+ jz4725b_bch_config_set(bch, BCH_BHCR_ENCE);
+ else /* correct data from ECC */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_ENCE);
+
+ jz4725b_bch_config_set(bch, BCH_BHCR_INIT);
+
+ max_value = BCH_BHCNT_ENC_COUNT_MASK >> BCH_BHCNT_ENC_COUNT_SHIFT;
+ if (params->size > max_value)
+ return -EINVAL;
+
+ max_value = BCH_BHCNT_DEC_COUNT_MASK >> BCH_BHCNT_DEC_COUNT_SHIFT;
+ if (params->size + params->bytes > max_value)
+ return -EINVAL;
+
+ /* Set up BCH count register. */
+ reg = params->size << BCH_BHCNT_ENC_COUNT_SHIFT;
+ reg |= (params->size + params->bytes) << BCH_BHCNT_DEC_COUNT_SHIFT;
+ writel(reg, bch->base + BCH_BHCNT);
+
+ return 0;
+}
+
+static void jz4725b_bch_disable(struct ingenic_ecc *bch)
+{
+ /* Clear interrupts */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Disable the hardware */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BCHE);
+}
+
+static void jz4725b_bch_write_data(struct ingenic_ecc *bch, const u8 *buf,
+ size_t size)
+{
+ while (size--)
+ writeb(*buf++, bch->base + BCH_BHDR);
+}
+
+static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ u32 *dest32;
+ u8 *dest8;
+ u32 val, offset = 0;
+
+ dest32 = (u32 *)buf;
+ while (size32--) {
+ *dest32++ = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ offset += sizeof(u32);
+ }
+
+ dest8 = (u8 *)dest32;
+ val = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ switch (size8) {
+ case 3:
+ dest8[2] = (val >> 16) & 0xff;
+ /* fall-through */
+ case 2:
+ dest8[1] = (val >> 8) & 0xff;
+ /* fall-through */
+ case 1:
+ dest8[0] = val & 0xff;
+ break;
+ }
+}
+
+static int jz4725b_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
+ u32 *status)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ * While we could use interrupts here and sleep until the operation
+ * completes, the controller works fairly quickly (usually a few
+ * microseconds) and so the overhead of sleeping until we get an
+ * interrupt quite noticeably decreases performance.
+ */
+ ret = readl_relaxed_poll_timeout(bch->base + BCH_BHINT, reg,
+ reg & irq, 0, BCH_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ if (status)
+ *status = reg;
+
+ writel(reg, bch->base + BCH_BHINT);
+
+ return 0;
+}
+
+static int jz4725b_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, true);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL);
+ if (ret) {
+ dev_err(bch->dev, "timed out while calculating ECC\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_read_parity(bch, ecc_code, params->bytes);
+
+out_disable:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static int jz4725b_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ u32 reg, errors, bit;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, false);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+ jz4725b_bch_write_data(bch, ecc_code, params->bytes);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_DECF, &reg);
+ if (ret) {
+ dev_err(bch->dev, "timed out while correcting data\n");
+ goto out;
+ }
+
+ if (reg & (BCH_BHINT_ALL_F | BCH_BHINT_ALL_0)) {
+ /* Data and ECC is all 0xff or 0x00 - nothing to correct */
+ ret = 0;
+ goto out;
+ }
+
+ if (reg & BCH_BHINT_UNCOR) {
+ /* Uncorrectable ECC error */
+ ret = -EBADMSG;
+ goto out;
+ }
+
+ errors = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
+
+ /* Correct any detected errors. */
+ for (i = 0; i < errors; i++) {
+ if (i & 1) {
+ bit = (reg & BCH_BHERR_INDEX1_MASK) >> BCH_BHERR_INDEX1_SHIFT;
+ } else {
+ reg = readl(bch->base + BCH_BHERR0 + (i * 4));
+ bit = (reg & BCH_BHERR_INDEX0_MASK) >> BCH_BHERR_INDEX0_SHIFT;
+ }
+
+ buf[(bit >> 3)] ^= BIT(bit & 0x7);
+ }
+
+out:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static const struct ingenic_ecc_ops jz4725b_bch_ops = {
+ .disable = jz4725b_bch_disable,
+ .calculate = jz4725b_calculate,
+ .correct = jz4725b_correct,
+};
+
+static const struct of_device_id jz4725b_bch_dt_match[] = {
+ { .compatible = "ingenic,jz4725b-bch", .data = &jz4725b_bch_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4725b_bch_dt_match);
+
+static struct platform_driver jz4725b_bch_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4725b-bch",
+ .of_match_table = jz4725b_bch_dt_match,
+ },
+};
+module_platform_driver(jz4725b_bch_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4725B BCH controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
new file mode 100644
index 000000000000..13fea645c7f0
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4740 ECC controller driver
+ *
+ * Copyright (c) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * based on jz4740-nand.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define JZ_REG_NAND_ECC_CTRL 0x00
+#define JZ_REG_NAND_DATA 0x04
+#define JZ_REG_NAND_PAR0 0x08
+#define JZ_REG_NAND_PAR1 0x0C
+#define JZ_REG_NAND_PAR2 0x10
+#define JZ_REG_NAND_IRQ_STAT 0x14
+#define JZ_REG_NAND_IRQ_CTRL 0x18
+#define JZ_REG_NAND_ERR(x) (0x1C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
+#define JZ_NAND_ECC_CTRL_RS BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
+#define JZ_NAND_STATUS_ERROR BIT(0)
+
+static const uint8_t empty_block_ecc[] = {
+ 0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f
+};
+
+static void jz4740_ecc_reset(struct ingenic_ecc *ecc, bool calc_ecc)
+{
+ uint32_t reg;
+
+ /* Clear interrupt status */
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+
+ /* Initialize and enable ECC hardware */
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_RESET;
+ reg |= JZ_NAND_ECC_CTRL_ENABLE;
+ reg |= JZ_NAND_ECC_CTRL_RS;
+ if (calc_ecc) /* calculate ECC from data */
+ reg |= JZ_NAND_ECC_CTRL_ENCODING;
+ else /* correct data from ECC */
+ reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz4740_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ uint32_t reg, status;
+ unsigned int timeout = 1000;
+ int i;
+
+ jz4740_ecc_reset(ecc, true);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ for (i = 0; i < params->bytes; ++i)
+ ecc_code[i] = readb(ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ /*
+ * If the written data is completely 0xff, we also want to write 0xff as
+ * ECC, otherwise we will get in trouble when doing subpage writes.
+ */
+ if (memcmp(ecc_code, empty_block_ecc, ARRAY_SIZE(empty_block_ecc)) == 0)
+ memset(ecc_code, 0xff, ARRAY_SIZE(empty_block_ecc));
+
+ return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *buf, int index, int mask)
+{
+ int offset = index & 0x7;
+ uint16_t data;
+
+ index += (index >> 3);
+
+ data = buf[index];
+ data |= buf[index + 1] << 8;
+
+ mask ^= (data >> offset) & 0x1ff;
+ data &= ~(0x1ff << offset);
+ data |= (mask << offset);
+
+ buf[index] = data & 0xff;
+ buf[index + 1] = (data >> 8) & 0xff;
+}
+
+static int jz4740_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ int i, error_count, index;
+ uint32_t reg, status, error;
+ unsigned int timeout = 1000;
+
+ jz4740_ecc_reset(ecc, false);
+
+ for (i = 0; i < params->bytes; ++i)
+ writeb(ecc_code[i], ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ if (status & JZ_NAND_STATUS_ERROR) {
+ if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+ return -EBADMSG;
+
+ error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+ for (i = 0; i < error_count; ++i) {
+ error = readl(ecc->base + JZ_REG_NAND_ERR(i));
+ index = ((error >> 16) & 0x1ff) - 1;
+ if (index >= 0 && index < params->size)
+ jz_nand_correct_data(buf, index, error & 0x1ff);
+ }
+
+ return error_count;
+ }
+
+ return 0;
+}
+
+static void jz4740_ecc_disable(struct ingenic_ecc *ecc)
+{
+ u32 reg;
+
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static const struct ingenic_ecc_ops jz4740_ecc_ops = {
+ .disable = jz4740_ecc_disable,
+ .calculate = jz4740_ecc_calculate,
+ .correct = jz4740_ecc_correct,
+};
+
+static const struct of_device_id jz4740_ecc_dt_match[] = {
+ { .compatible = "ingenic,jz4740-ecc", .data = &jz4740_ecc_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_ecc_dt_match);
+
+static struct platform_driver jz4740_ecc_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4740-ecc",
+ .of_match_table = jz4740_ecc_dt_match,
+ },
+};
+module_platform_driver(jz4740_ecc_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4740 ECC controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/ingenic/jz4740_nand.c
index 9526d5b23c80..f759f1672855 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4740_nand.c
@@ -313,8 +313,11 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
u8 id[2];
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
ret = jz_nand_ioremap_resource(pdev, res_name,
@@ -351,8 +354,8 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
}
/* Update size of the MTD. */
- chip->numchips++;
- mtd->size += chip->chipsize;
+ memorg->ntargets++;
+ mtd->size += nanddev_target_size(&chip->base);
}
dev_info(&pdev->dev, "Found chip %zu on bank %i\n", chipnr, bank);
diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
index c5f74ed85862..079266a0d6cf 100644
--- a/drivers/mtd/nand/raw/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -1,28 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * JZ4780 BCH controller
+ * JZ4780 BCH controller driver
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include "jz4780_bch.h"
+#include "ingenic_ecc.h"
#define BCH_BHCR 0x0
#define BCH_BHCCR 0x8
@@ -65,15 +59,8 @@
/* Timeout for BCH calculation/correction. */
#define BCH_TIMEOUT_US 100000
-struct jz4780_bch {
- struct device *dev;
- void __iomem *base;
- struct clk *clk;
- struct mutex lock;
-};
-
-static void jz4780_bch_init(struct jz4780_bch *bch,
- struct jz4780_bch_params *params, bool encode)
+static void jz4780_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool encode)
{
u32 reg;
@@ -93,13 +80,13 @@ static void jz4780_bch_init(struct jz4780_bch *bch,
writel(reg, bch->base + BCH_BHCR);
}
-static void jz4780_bch_disable(struct jz4780_bch *bch)
+static void jz4780_bch_disable(struct ingenic_ecc *bch)
{
writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
writel(BCH_BHCR_BCHE, bch->base + BCH_BHCCR);
}
-static void jz4780_bch_write_data(struct jz4780_bch *bch, const void *buf,
+static void jz4780_bch_write_data(struct ingenic_ecc *bch, const void *buf,
size_t size)
{
size_t size32 = size / sizeof(u32);
@@ -116,7 +103,7 @@ static void jz4780_bch_write_data(struct jz4780_bch *bch, const void *buf,
writeb(*src8++, bch->base + BCH_BHDR);
}
-static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
+static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
size_t size)
{
size_t size32 = size / sizeof(u32);
@@ -146,7 +133,7 @@ static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
}
}
-static bool jz4780_bch_wait_complete(struct jz4780_bch *bch, unsigned int irq,
+static bool jz4780_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
u32 *status)
{
u32 reg;
@@ -170,23 +157,15 @@ static bool jz4780_bch_wait_complete(struct jz4780_bch *bch, unsigned int irq,
return true;
}
-/**
- * jz4780_bch_calculate() - calculate ECC for a data buffer
- * @bch: BCH device.
- * @params: BCH parameters.
- * @buf: input buffer with raw data.
- * @ecc_code: output buffer with ECC.
- *
- * Return: 0 on success, -ETIMEDOUT if timed out while waiting for BCH
- * controller.
- */
-int jz4780_bch_calculate(struct jz4780_bch *bch, struct jz4780_bch_params *params,
- const u8 *buf, u8 *ecc_code)
+static int jz4780_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
{
int ret = 0;
mutex_lock(&bch->lock);
- jz4780_bch_init(bch, params, true);
+
+ jz4780_bch_reset(bch, params, true);
jz4780_bch_write_data(bch, buf, params->size);
if (jz4780_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL)) {
@@ -200,30 +179,17 @@ int jz4780_bch_calculate(struct jz4780_bch *bch, struct jz4780_bch_params *param
mutex_unlock(&bch->lock);
return ret;
}
-EXPORT_SYMBOL(jz4780_bch_calculate);
-
-/**
- * jz4780_bch_correct() - detect and correct bit errors
- * @bch: BCH device.
- * @params: BCH parameters.
- * @buf: raw data read from the chip.
- * @ecc_code: ECC read from the chip.
- *
- * Given the raw data and the ECC read from the NAND device, detects and
- * corrects errors in the data.
- *
- * Return: the number of bit errors corrected, -EBADMSG if there are too many
- * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
- */
-int jz4780_bch_correct(struct jz4780_bch *bch, struct jz4780_bch_params *params,
- u8 *buf, u8 *ecc_code)
+
+static int jz4780_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
{
u32 reg, mask, index;
int i, ret, count;
mutex_lock(&bch->lock);
- jz4780_bch_init(bch, params, false);
+ jz4780_bch_reset(bch, params, false);
jz4780_bch_write_data(bch, buf, params->size);
jz4780_bch_write_data(bch, ecc_code, params->bytes);
@@ -262,110 +228,30 @@ out:
mutex_unlock(&bch->lock);
return ret;
}
-EXPORT_SYMBOL(jz4780_bch_correct);
-
-/**
- * jz4780_bch_get() - get the BCH controller device
- * @np: BCH device tree node.
- *
- * Gets the BCH controller device from the specified device tree node. The
- * device must be released with jz4780_bch_release() when it is no longer being
- * used.
- *
- * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
- * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
- */
-static struct jz4780_bch *jz4780_bch_get(struct device_node *np)
-{
- struct platform_device *pdev;
- struct jz4780_bch *bch;
-
- pdev = of_find_device_by_node(np);
- if (!pdev)
- return ERR_PTR(-EPROBE_DEFER);
-
- bch = platform_get_drvdata(pdev);
- if (!bch) {
- put_device(&pdev->dev);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- clk_prepare_enable(bch->clk);
-
- return bch;
-}
-
-/**
- * of_jz4780_bch_get() - get the BCH controller from a DT node
- * @of_node: the node that contains a bch-controller property.
- *
- * Get the bch-controller property from the given device tree
- * node and pass it to jz4780_bch_get to do the work.
- *
- * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
- * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
- */
-struct jz4780_bch *of_jz4780_bch_get(struct device_node *of_node)
-{
- struct jz4780_bch *bch = NULL;
- struct device_node *np;
-
- np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
-
- if (np) {
- bch = jz4780_bch_get(np);
- of_node_put(np);
- }
- return bch;
-}
-EXPORT_SYMBOL(of_jz4780_bch_get);
-
-/**
- * jz4780_bch_release() - release the BCH controller device
- * @bch: BCH device.
- */
-void jz4780_bch_release(struct jz4780_bch *bch)
-{
- clk_disable_unprepare(bch->clk);
- put_device(bch->dev);
-}
-EXPORT_SYMBOL(jz4780_bch_release);
static int jz4780_bch_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct jz4780_bch *bch;
- struct resource *res;
-
- bch = devm_kzalloc(dev, sizeof(*bch), GFP_KERNEL);
- if (!bch)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bch->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(bch->base))
- return PTR_ERR(bch->base);
-
- jz4780_bch_disable(bch);
+ struct ingenic_ecc *bch;
+ int ret;
- bch->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(bch->clk)) {
- dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(bch->clk));
- return PTR_ERR(bch->clk);
- }
+ ret = ingenic_ecc_probe(pdev);
+ if (ret)
+ return ret;
+ bch = platform_get_drvdata(pdev);
clk_set_rate(bch->clk, BCH_CLK_RATE);
- mutex_init(&bch->lock);
-
- bch->dev = dev;
- platform_set_drvdata(pdev, bch);
-
return 0;
}
+static const struct ingenic_ecc_ops jz4780_bch_ops = {
+ .disable = jz4780_bch_disable,
+ .calculate = jz4780_calculate,
+ .correct = jz4780_correct,
+};
+
static const struct of_device_id jz4780_bch_dt_match[] = {
- { .compatible = "ingenic,jz4780-bch" },
+ { .compatible = "ingenic,jz4780-bch", .data = &jz4780_bch_ops },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_bch_dt_match);
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index fbf6ca015cd7..cba6fe7dd8c4 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -76,6 +76,7 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
/* Core functions */
const struct nand_manufacturer *nand_get_manufacturer(u8 id);
+int nand_bbm_get_next_page(struct nand_chip *chip, int page);
int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt);
@@ -110,7 +111,7 @@ static inline int nand_exec_op(struct nand_chip *chip,
if (!nand_has_exec_op(chip))
return -ENOTSUPP;
- if (WARN_ON(op->cs >= chip->numchips))
+ if (WARN_ON(op->cs >= nanddev_ntargets(&chip->base)))
return -EINVAL;
return chip->controller->ops->exec_op(chip, op, false);
diff --git a/drivers/mtd/nand/raw/jz4780_bch.h b/drivers/mtd/nand/raw/jz4780_bch.h
deleted file mode 100644
index bf4718088a3a..000000000000
--- a/drivers/mtd/nand/raw/jz4780_bch.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * JZ4780 BCH controller
- *
- * Copyright (c) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __DRIVERS_MTD_NAND_JZ4780_BCH_H__
-#define __DRIVERS_MTD_NAND_JZ4780_BCH_H__
-
-#include <linux/types.h>
-
-struct device;
-struct device_node;
-struct jz4780_bch;
-
-/**
- * struct jz4780_bch_params - BCH parameters
- * @size: data bytes per ECC step.
- * @bytes: ECC bytes per step.
- * @strength: number of correctable bits per ECC step.
- */
-struct jz4780_bch_params {
- int size;
- int bytes;
- int strength;
-};
-
-int jz4780_bch_calculate(struct jz4780_bch *bch,
- struct jz4780_bch_params *params,
- const u8 *buf, u8 *ecc_code);
-int jz4780_bch_correct(struct jz4780_bch *bch,
- struct jz4780_bch_params *params, u8 *buf,
- u8 *ecc_code);
-
-void jz4780_bch_release(struct jz4780_bch *bch);
-struct jz4780_bch *of_jz4780_bch_get(struct device_node *np);
-
-#endif /* __DRIVERS_MTD_NAND_JZ4780_BCH_H__ */
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
deleted file mode 100644
index 22e58975f0d5..000000000000
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * JZ4780 NAND driver
- *
- * Copyright (c) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/jz4780-nemc.h>
-
-#include "jz4780_bch.h"
-
-#define DRV_NAME "jz4780-nand"
-
-#define OFFSET_DATA 0x00000000
-#define OFFSET_CMD 0x00400000
-#define OFFSET_ADDR 0x00800000
-
-/* Command delay when there is no R/B pin. */
-#define RB_DELAY_US 100
-
-struct jz4780_nand_cs {
- unsigned int bank;
- void __iomem *base;
-};
-
-struct jz4780_nand_controller {
- struct device *dev;
- struct jz4780_bch *bch;
- struct nand_controller controller;
- unsigned int num_banks;
- struct list_head chips;
- int selected;
- struct jz4780_nand_cs cs[];
-};
-
-struct jz4780_nand_chip {
- struct nand_chip chip;
- struct list_head chip_list;
-
- struct gpio_desc *busy_gpio;
- struct gpio_desc *wp_gpio;
- unsigned int reading: 1;
-};
-
-static inline struct jz4780_nand_chip *to_jz4780_nand_chip(struct mtd_info *mtd)
-{
- return container_of(mtd_to_nand(mtd), struct jz4780_nand_chip, chip);
-}
-
-static inline struct jz4780_nand_controller
-*to_jz4780_nand_controller(struct nand_controller *ctrl)
-{
- return container_of(ctrl, struct jz4780_nand_controller, controller);
-}
-
-static void jz4780_nand_select_chip(struct nand_chip *chip, int chipnr)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_nand_cs *cs;
-
- /* Ensure the currently selected chip is deasserted. */
- if (chipnr == -1 && nfc->selected >= 0) {
- cs = &nfc->cs[nfc->selected];
- jz4780_nemc_assert(nfc->dev, cs->bank, false);
- }
-
- nfc->selected = chipnr;
-}
-
-static void jz4780_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_nand_cs *cs;
-
- if (WARN_ON(nfc->selected < 0))
- return;
-
- cs = &nfc->cs[nfc->selected];
-
- jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_ALE)
- writeb(cmd, cs->base + OFFSET_ADDR);
- else if (ctrl & NAND_CLE)
- writeb(cmd, cs->base + OFFSET_CMD);
-}
-
-static int jz4780_nand_dev_ready(struct nand_chip *chip)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
-
- return !gpiod_get_value_cansleep(nand->busy_gpio);
-}
-
-static void jz4780_nand_ecc_hwctl(struct nand_chip *chip, int mode)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
-
- nand->reading = (mode == NAND_ECC_READ);
-}
-
-static int jz4780_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
- u8 *ecc_code)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_bch_params params;
-
- /*
- * Don't need to generate the ECC when reading, BCH does it for us as
- * part of decoding/correction.
- */
- if (nand->reading)
- return 0;
-
- params.size = nand->chip.ecc.size;
- params.bytes = nand->chip.ecc.bytes;
- params.strength = nand->chip.ecc.strength;
-
- return jz4780_bch_calculate(nfc->bch, &params, dat, ecc_code);
-}
-
-static int jz4780_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
- u8 *read_ecc, u8 *calc_ecc)
-{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
- struct jz4780_bch_params params;
-
- params.size = nand->chip.ecc.size;
- params.bytes = nand->chip.ecc.bytes;
- params.strength = nand->chip.ecc.strength;
-
- return jz4780_bch_correct(nfc->bch, &params, dat, read_ecc);
-}
-
-static int jz4780_nand_attach_chip(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
- int eccbytes;
-
- chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
- (chip->ecc.strength / 8);
-
- switch (chip->ecc.mode) {
- case NAND_ECC_HW:
- if (!nfc->bch) {
- dev_err(nfc->dev,
- "HW BCH selected, but BCH controller not found\n");
- return -ENODEV;
- }
-
- chip->ecc.hwctl = jz4780_nand_ecc_hwctl;
- chip->ecc.calculate = jz4780_nand_ecc_calculate;
- chip->ecc.correct = jz4780_nand_ecc_correct;
- /* fall through */
- case NAND_ECC_SOFT:
- dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
- (nfc->bch) ? "hardware BCH" : "software ECC",
- chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
- break;
- case NAND_ECC_NONE:
- dev_info(nfc->dev, "not using ECC\n");
- break;
- default:
- dev_err(nfc->dev, "ECC mode %d not supported\n",
- chip->ecc.mode);
- return -EINVAL;
- }
-
- /* The NAND core will generate the ECC layout for SW ECC */
- if (chip->ecc.mode != NAND_ECC_HW)
- return 0;
-
- /* Generate ECC layout. ECC codes are right aligned in the OOB area. */
- eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
-
- if (eccbytes > mtd->oobsize - 2) {
- dev_err(nfc->dev,
- "invalid ECC config: required %d ECC bytes, but only %d are available",
- eccbytes, mtd->oobsize - 2);
- return -EINVAL;
- }
-
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-
- return 0;
-}
-
-static const struct nand_controller_ops jz4780_nand_controller_ops = {
- .attach_chip = jz4780_nand_attach_chip,
-};
-
-static int jz4780_nand_init_chip(struct platform_device *pdev,
- struct jz4780_nand_controller *nfc,
- struct device_node *np,
- unsigned int chipnr)
-{
- struct device *dev = &pdev->dev;
- struct jz4780_nand_chip *nand;
- struct jz4780_nand_cs *cs;
- struct resource *res;
- struct nand_chip *chip;
- struct mtd_info *mtd;
- const __be32 *reg;
- int ret = 0;
-
- cs = &nfc->cs[chipnr];
-
- reg = of_get_property(np, "reg", NULL);
- if (!reg)
- return -EINVAL;
-
- cs->bank = be32_to_cpu(*reg);
-
- jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, chipnr);
- cs->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(cs->base))
- return PTR_ERR(cs->base);
-
- nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
- if (!nand)
- return -ENOMEM;
-
- nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
-
- if (IS_ERR(nand->busy_gpio)) {
- ret = PTR_ERR(nand->busy_gpio);
- dev_err(dev, "failed to request busy GPIO: %d\n", ret);
- return ret;
- } else if (nand->busy_gpio) {
- nand->chip.legacy.dev_ready = jz4780_nand_dev_ready;
- }
-
- nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
-
- if (IS_ERR(nand->wp_gpio)) {
- ret = PTR_ERR(nand->wp_gpio);
- dev_err(dev, "failed to request WP GPIO: %d\n", ret);
- return ret;
- }
-
- chip = &nand->chip;
- mtd = nand_to_mtd(chip);
- mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
- cs->bank);
- if (!mtd->name)
- return -ENOMEM;
- mtd->dev.parent = dev;
-
- chip->legacy.IO_ADDR_R = cs->base + OFFSET_DATA;
- chip->legacy.IO_ADDR_W = cs->base + OFFSET_DATA;
- chip->legacy.chip_delay = RB_DELAY_US;
- chip->options = NAND_NO_SUBPAGE_WRITE;
- chip->legacy.select_chip = jz4780_nand_select_chip;
- chip->legacy.cmd_ctrl = jz4780_nand_cmd_ctrl;
- chip->ecc.mode = NAND_ECC_HW;
- chip->controller = &nfc->controller;
- nand_set_flash_node(chip, np);
-
- chip->controller->ops = &jz4780_nand_controller_ops;
- ret = nand_scan(chip, 1);
- if (ret)
- return ret;
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret) {
- nand_release(chip);
- return ret;
- }
-
- list_add_tail(&nand->chip_list, &nfc->chips);
-
- return 0;
-}
-
-static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc)
-{
- struct jz4780_nand_chip *chip;
-
- while (!list_empty(&nfc->chips)) {
- chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list);
- nand_release(&chip->chip);
- list_del(&chip->chip_list);
- }
-}
-
-static int jz4780_nand_init_chips(struct jz4780_nand_controller *nfc,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np;
- int i = 0;
- int ret;
- int num_chips = of_get_child_count(dev->of_node);
-
- if (num_chips > nfc->num_banks) {
- dev_err(dev, "found %d chips but only %d banks\n", num_chips, nfc->num_banks);
- return -EINVAL;
- }
-
- for_each_child_of_node(dev->of_node, np) {
- ret = jz4780_nand_init_chip(pdev, nfc, np, i);
- if (ret) {
- jz4780_nand_cleanup_chips(nfc);
- return ret;
- }
-
- i++;
- }
-
- return 0;
-}
-
-static int jz4780_nand_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- unsigned int num_banks;
- struct jz4780_nand_controller *nfc;
- int ret;
-
- num_banks = jz4780_nemc_num_banks(dev);
- if (num_banks == 0) {
- dev_err(dev, "no banks found\n");
- return -ENODEV;
- }
-
- nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
- if (!nfc)
- return -ENOMEM;
-
- /*
- * Check for BCH HW before we call nand_scan_ident, to prevent us from
- * having to call it again if the BCH driver returns -EPROBE_DEFER.
- */
- nfc->bch = of_jz4780_bch_get(dev->of_node);
- if (IS_ERR(nfc->bch))
- return PTR_ERR(nfc->bch);
-
- nfc->dev = dev;
- nfc->num_banks = num_banks;
-
- nand_controller_init(&nfc->controller);
- INIT_LIST_HEAD(&nfc->chips);
-
- ret = jz4780_nand_init_chips(nfc, pdev);
- if (ret) {
- if (nfc->bch)
- jz4780_bch_release(nfc->bch);
- return ret;
- }
-
- platform_set_drvdata(pdev, nfc);
- return 0;
-}
-
-static int jz4780_nand_remove(struct platform_device *pdev)
-{
- struct jz4780_nand_controller *nfc = platform_get_drvdata(pdev);
-
- if (nfc->bch)
- jz4780_bch_release(nfc->bch);
-
- jz4780_nand_cleanup_chips(nfc);
-
- return 0;
-}
-
-static const struct of_device_id jz4780_nand_dt_match[] = {
- { .compatible = "ingenic,jz4780-nand" },
- {},
-};
-MODULE_DEVICE_TABLE(of, jz4780_nand_dt_match);
-
-static struct platform_driver jz4780_nand_driver = {
- .probe = jz4780_nand_probe,
- .remove = jz4780_nand_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(jz4780_nand_dt_match),
- },
-};
-module_platform_driver(jz4780_nand_driver);
-
-MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
-MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index d984538980e2..fc49e13d81ec 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1083,12 +1083,11 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
*/
static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
{
- /* Invalidate page cache */
- chip->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(chip);
marvell_nfc_select_target(chip, chip->cur_cs);
- return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
- chip->oob_poi, true, page);
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
}
/* Hamming write helpers */
@@ -1179,15 +1178,13 @@ static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
- /* Invalidate page cache */
- chip->pagebuf = -1;
-
- memset(chip->data_buf, 0xFF, mtd->writesize);
+ memset(buf, 0xFF, mtd->writesize);
marvell_nfc_select_target(chip, chip->cur_cs);
- return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
- chip->oob_poi, true, page);
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
}
/* BCH read helpers */
@@ -1434,18 +1431,16 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
{
- /* Invalidate page cache */
- chip->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(chip);
- return chip->ecc.read_page_raw(chip, chip->data_buf, true, page);
+ return chip->ecc.read_page_raw(chip, buf, true, page);
}
static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
{
- /* Invalidate page cache */
- chip->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(chip);
- return chip->ecc.read_page(chip, chip->data_buf, true, page);
+ return chip->ecc.read_page(chip, buf, true, page);
}
/* BCH write helpers */
@@ -1619,25 +1614,21 @@ static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
- /* Invalidate page cache */
- chip->pagebuf = -1;
-
- memset(chip->data_buf, 0xFF, mtd->writesize);
+ memset(buf, 0xFF, mtd->writesize);
- return chip->ecc.write_page_raw(chip, chip->data_buf, true, page);
+ return chip->ecc.write_page_raw(chip, buf, true, page);
}
static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
- /* Invalidate page cache */
- chip->pagebuf = -1;
-
- memset(chip->data_buf, 0xFF, mtd->writesize);
+ memset(buf, 0xFF, mtd->writesize);
- return chip->ecc.write_page(chip, chip->data_buf, true, page);
+ return chip->ecc.write_page(chip, buf, true, page);
}
/* NAND framework ->exec_op() hooks and related helpers */
@@ -2257,9 +2248,9 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
int ret;
if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
- if (chip->ecc_step_ds && chip->ecc_strength_ds) {
- ecc->size = chip->ecc_step_ds;
- ecc->strength = chip->ecc_strength_ds;
+ if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
+ ecc->size = chip->base.eccreq.step_size;
+ ecc->strength = chip->base.eccreq.strength;
} else {
dev_info(nfc->dev,
"No minimum ECC strength, using 1b/512B\n");
@@ -2989,7 +2980,7 @@ static int __maybe_unused marvell_nfc_resume(struct device *dev)
/*
* Reset nfc->selected_chip so the next command will cause the timing
- * registers to be restored in marvell_nfc_select_chip().
+ * registers to be restored in marvell_nfc_select_target().
*/
nfc->selected_chip = NULL;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 3e8aa71407b5..ea57ddcec41e 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -400,7 +400,7 @@ static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms)
cfg |= NFC_RB_IRQ_EN;
writel(cfg, nfc->reg_base + NFC_REG_CFG);
- init_completion(&nfc->completion);
+ reinit_completion(&nfc->completion);
/* use the max erase time as the maximum clock for waiting R/B */
cmd = NFC_CMD_RB | NFC_CMD_RB_INT
@@ -470,15 +470,15 @@ static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
return ret;
}
-static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, u8 *databuf,
- int datalen, u8 *infobuf, int infolen,
+static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
+ int datalen, void *infobuf, int infolen,
enum dma_data_direction dir)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 cmd;
int ret = 0;
- nfc->daddr = dma_map_single(nfc->dev, (void *)databuf, datalen, dir);
+ nfc->daddr = dma_map_single(nfc->dev, databuf, datalen, dir);
ret = dma_mapping_error(nfc->dev, nfc->daddr);
if (ret) {
dev_err(nfc->dev, "DMA mapping error\n");
@@ -528,10 +528,13 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
u8 *info;
info = kzalloc(PER_INFO_BYTE, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
ret = meson_nfc_dma_buffer_setup(nand, buf, len, info,
PER_INFO_BYTE, DMA_FROM_DEVICE);
if (ret)
- return ret;
+ goto out;
cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
@@ -539,6 +542,8 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
meson_nfc_drain_cmd(nfc);
meson_nfc_wait_cmd_finish(nfc, 1000);
meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE);
+
+out:
kfree(info);
return ret;
@@ -640,7 +645,7 @@ static int meson_nfc_write_page_sub(struct nand_chip *nand,
return ret;
ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
- data_len, (u8 *)meson_chip->info_buf,
+ data_len, meson_chip->info_buf,
info_len, DMA_TO_DEVICE);
if (ret)
return ret;
@@ -724,7 +729,7 @@ static int meson_nfc_read_page_sub(struct nand_chip *nand,
return ret;
ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
- data_len, (u8 *)meson_chip->info_buf,
+ data_len, meson_chip->info_buf,
info_len, DMA_FROM_DEVICE);
if (ret)
return ret;
@@ -1183,6 +1188,8 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
return -EINVAL;
}
+ mtd_set_ooblayout(mtd, &meson_ooblayout_ops);
+
ret = meson_nand_bch_mode(nand);
if (ret)
return -EINVAL;
@@ -1226,17 +1233,13 @@ meson_nfc_nand_chip_init(struct device *dev,
int ret, i;
u32 tmp, nsels;
- if (!of_get_property(np, "reg", &nsels))
- return -EINVAL;
-
- nsels /= sizeof(u32);
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (!nsels || nsels > MAX_CE_NUM) {
dev_err(dev, "invalid register property size\n");
return -EINVAL;
}
- meson_chip = devm_kzalloc(dev,
- sizeof(*meson_chip) + (nsels * sizeof(u8)),
+ meson_chip = devm_kzalloc(dev, struct_size(meson_chip, sels, nsels),
GFP_KERNEL);
if (!meson_chip)
return -ENOMEM;
@@ -1377,6 +1380,7 @@ static int meson_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->completion);
nfc->dev = dev;
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 2c0e09187773..b17619f30b1b 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1197,8 +1197,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
/* if optional dt settings not present */
if (!nand->ecc.size || !nand->ecc.strength) {
/* use datasheet requirements */
- nand->ecc.strength = nand->ecc_strength_ds;
- nand->ecc.size = nand->ecc_step_ds;
+ nand->ecc.strength = nand->base.eccreq.strength;
+ nand->ecc.size = nand->base.eccreq.step_size;
/*
* align eccstrength and eccsize
diff --git a/drivers/mtd/nand/raw/nand_amd.c b/drivers/mtd/nand/raw/nand_amd.c
index 890c5b43e03c..6217555c19a6 100644
--- a/drivers/mtd/nand/raw/nand_amd.c
+++ b/drivers/mtd/nand/raw/nand_amd.c
@@ -20,6 +20,9 @@
static void amd_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
nand_decode_ext_id(chip);
@@ -31,16 +34,24 @@ static void amd_nand_decode_id(struct nand_chip *chip)
*/
if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 &&
chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 &&
- mtd->writesize == 512) {
- mtd->erasesize = 128 * 1024;
- mtd->erasesize <<= ((chip->id.data[3] & 0x03) << 1);
+ memorg->pagesize == 512) {
+ memorg->pages_per_eraseblock = 256;
+ memorg->pages_per_eraseblock <<= ((chip->id.data[3] & 0x03) << 1);
+ mtd->erasesize = memorg->pages_per_eraseblock *
+ memorg->pagesize;
}
}
static int amd_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ /*
+ * According to the datasheet of some Cypress SLC NANDs,
+ * the bad block markers can be in the first, second or last
+ * page of a block. So let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index ddd396e93e32..2cf71060d6f8 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -240,10 +240,10 @@ static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
void nand_select_target(struct nand_chip *chip, unsigned int cs)
{
/*
- * cs should always lie between 0 and chip->numchips, when that's not
- * the case it's a bug and the caller should be fixed.
+ * cs should always lie between 0 and nanddev_ntargets(), when that's
+ * not the case it's a bug and the caller should be fixed.
*/
- if (WARN_ON(cs > chip->numchips))
+ if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
return;
chip->cur_cs = cs;
@@ -283,6 +283,31 @@ static void nand_release_device(struct nand_chip *chip)
}
/**
+ * nand_bbm_get_next_page - Get the next page for bad block markers
+ * @chip: NAND chip object
+ * @page: First page to start checking for bad block marker usage
+ *
+ * Returns an integer that corresponds to the page offset within a block, for
+ * a page that is used to store bad block markers. If no more pages are
+ * available, -EINVAL is returned.
+ */
+int nand_bbm_get_next_page(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int last_page = ((mtd->erasesize - mtd->writesize) >>
+ chip->page_shift) & chip->pagemask;
+
+ if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
+ return 0;
+ else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ return 1;
+ else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ return last_page;
+
+ return -EINVAL;
+}
+
+/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
* @chip: NAND chip object
* @ofs: offset from device start
@@ -291,18 +316,15 @@ static void nand_release_device(struct nand_chip *chip)
*/
static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int page, page_end, res;
+ int first_page, page_offset;
+ int res;
u8 bad;
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- ofs += mtd->erasesize - mtd->writesize;
-
- page = (int)(ofs >> chip->page_shift) & chip->pagemask;
- page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
+ first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ page_offset = nand_bbm_get_next_page(chip, 0);
- for (; page < page_end; page++) {
- res = chip->ecc.read_oob(chip, page);
+ while (page_offset >= 0) {
+ res = chip->ecc.read_oob(chip, first_page + page_offset);
if (res < 0)
return res;
@@ -314,6 +336,8 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
res = hweight8(bad) < chip->badblockbits;
if (res)
return res;
+
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
}
return 0;
@@ -459,8 +483,8 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
}
/* Invalidate the page cache, if we write to the cached page */
- if (page == chip->pagebuf)
- chip->pagebuf = -1;
+ if (page == chip->pagecache.page)
+ chip->pagecache.page = -1;
nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
@@ -493,7 +517,7 @@ static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_ops ops;
uint8_t buf[2] = { 0, 0 };
- int ret = 0, res, i = 0;
+ int ret = 0, res, page_offset;
memset(&ops, 0, sizeof(ops));
ops.oobbuf = buf;
@@ -506,17 +530,18 @@ static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
}
ops.mode = MTD_OPS_PLACE_OOB;
- /* Write to first/last page(s) if necessary */
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- ofs += mtd->erasesize - mtd->writesize;
- do {
- res = nand_do_write_oob(chip, ofs, &ops);
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = nand_do_write_oob(chip,
+ ofs + (page_offset * mtd->writesize),
+ &ops);
+
if (!ret)
ret = res;
- i++;
- ofs += mtd->writesize;
- } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
return ret;
}
@@ -3173,7 +3198,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
use_bufpoi = 0;
/* Is the current page in the buffer? */
- if (realpage != chip->pagebuf || oob) {
+ if (realpage != chip->pagecache.page || oob) {
bufpoi = use_bufpoi ? chip->data_buf : buf;
if (use_bufpoi && aligned)
@@ -3199,7 +3224,7 @@ read_retry:
if (ret < 0) {
if (use_bufpoi)
/* Invalidate page cache */
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
break;
}
@@ -3208,11 +3233,11 @@ read_retry:
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - ecc_failures) &&
(ops->mode != MTD_OPS_RAW)) {
- chip->pagebuf = realpage;
- chip->pagebuf_bitflips = ret;
+ chip->pagecache.page = realpage;
+ chip->pagecache.bitflips = ret;
} else {
/* Invalidate page cache */
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
}
memcpy(buf, chip->data_buf + col, bytes);
}
@@ -3252,7 +3277,7 @@ read_retry:
memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
- chip->pagebuf_bitflips);
+ chip->pagecache.bitflips);
}
readlen -= bytes;
@@ -3973,9 +3998,9 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
page = realpage & chip->pagemask;
/* Invalidate the page cache, when we write to the cached page */
- if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
- ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
- chip->pagebuf = -1;
+ if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
+ ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
+ chip->pagecache.page = -1;
/* Don't allow multipage oob writes with offset */
if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
@@ -4004,10 +4029,9 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
__func__, buf);
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
- chip->pagebuf = -1;
- memset(chip->data_buf, 0xff, mtd->writesize);
- memcpy(&chip->data_buf[column], buf, bytes);
- wbuf = chip->data_buf;
+ wbuf = nand_get_data_buf(chip);
+ memset(wbuf, 0xff, mtd->writesize);
+ memcpy(&wbuf[column], buf, bytes);
}
if (unlikely(oob)) {
@@ -4197,9 +4221,9 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
* Invalidate the page cache, if we erase the block which
* contains the current cached page.
*/
- if (page <= chip->pagebuf && chip->pagebuf <
+ if (page <= chip->pagecache.page && chip->pagecache.page <
(page + pages_per_block))
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
ret = nand_erase_op(chip, (page & chip->pagemask) >>
(chip->phys_erase_shift - chip->page_shift));
@@ -4299,42 +4323,6 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/**
- * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- * @len: length of mtd
- */
-static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- u32 part_start_block;
- u32 part_end_block;
- u32 part_start_die;
- u32 part_end_die;
-
- /*
- * max_bb_per_die and blocks_per_die used to determine
- * the maximum bad block count.
- */
- if (!chip->max_bb_per_die || !chip->blocks_per_die)
- return -ENOTSUPP;
-
- /* Get the start and end of the partition in erase blocks. */
- part_start_block = mtd_div_by_eb(ofs, mtd);
- part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
-
- /* Get the start and end LUNs of the partition. */
- part_start_die = part_start_block / chip->blocks_per_die;
- part_end_die = part_end_block / chip->blocks_per_die;
-
- /*
- * Look up the bad blocks per unit and multiply by the number of units
- * that the partition spans.
- */
- return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
-}
-
-/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
*/
@@ -4485,21 +4473,29 @@ static int nand_get_bits_per_cell(u8 cellinfo)
*/
void nand_decode_ext_id(struct nand_chip *chip)
{
+ struct nand_memory_organization *memorg;
struct mtd_info *mtd = nand_to_mtd(chip);
int extid;
u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
/* The 3rd id byte holds MLC / multichip data */
- chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
/* The 4th id byte is the important one */
extid = id_data[3];
/* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x03);
+ memorg->pagesize = 1024 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
extid >>= 2;
/* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ mtd->oobsize = memorg->oobsize;
extid >>= 2;
/* Calc blocksize. Blocksize is multiples of 64KiB */
+ memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
+ memorg->pagesize;
mtd->erasesize = (64 * 1024) << (extid & 0x03);
extid >>= 2;
/* Get buswidth information */
@@ -4516,13 +4512,19 @@ EXPORT_SYMBOL_GPL(nand_decode_ext_id);
static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+ memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
mtd->erasesize = type->erasesize;
- mtd->writesize = type->pagesize;
- mtd->oobsize = mtd->writesize / 32;
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->oobsize = memorg->pagesize / 32;
+ mtd->oobsize = memorg->oobsize;
/* All legacy ID NAND are small-page, SLC */
- chip->bits_per_cell = 1;
+ memorg->bits_per_cell = 1;
}
/*
@@ -4536,9 +4538,9 @@ static void nand_decode_bbm_options(struct nand_chip *chip)
/* Set the bad block position */
if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
- chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ chip->badblockpos = NAND_BBM_POS_LARGE;
else
- chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+ chip->badblockpos = NAND_BBM_POS_SMALL;
}
static inline bool is_full_id_nand(struct nand_flash_dev *type)
@@ -4550,18 +4552,28 @@ static bool find_full_id_nand(struct nand_chip *chip,
struct nand_flash_dev *type)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
u8 *id_data = chip->id.data;
+ memorg = nanddev_get_memorg(&chip->base);
+
if (!strncmp(type->id, id_data, type->id_len)) {
- mtd->writesize = type->pagesize;
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->pages_per_eraseblock = type->erasesize /
+ type->pagesize;
mtd->erasesize = type->erasesize;
- mtd->oobsize = type->oobsize;
-
- chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
- chip->chipsize = (uint64_t)type->chipsize << 20;
+ memorg->oobsize = type->oobsize;
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
chip->options |= type->options;
- chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
- chip->ecc_step_ds = NAND_ECC_STEP(type);
+ chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
+ chip->base.eccreq.step_size = NAND_ECC_STEP(type);
chip->onfi_timing_mode_default =
type->onfi_timing_mode_default;
@@ -4587,8 +4599,12 @@ static void nand_manufacturer_detect(struct nand_chip *chip)
*/
if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
chip->manufacturer.desc->ops->detect) {
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
/* The 3rd id byte holds MLC / multichip data */
- chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
chip->manufacturer.desc->ops->detect(chip);
} else {
nand_decode_ext_id(chip);
@@ -4637,9 +4653,20 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer *manufacturer;
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
+ u64 targetsize;
+
+ /*
+ * Let's start by initializing memorg fields that might be left
+ * unassigned by the ID-based detection logic.
+ */
+ memorg = nanddev_get_memorg(&chip->base);
+ memorg->planes_per_lun = 1;
+ memorg->luns_per_target = 1;
+ memorg->ntargets = 1;
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
@@ -4735,8 +4762,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
if (!chip->parameters.model)
return -ENOMEM;
- chip->chipsize = (uint64_t)type->chipsize << 20;
-
if (!type->pagesize)
nand_manufacturer_detect(chip);
else
@@ -4745,6 +4770,11 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
/* Get chip options */
chip->options |= type->options;
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
+
ident_done:
if (!mtd->name)
mtd->name = chip->parameters.model;
@@ -4773,14 +4803,15 @@ ident_done:
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
/* Convert chipsize to number of pages per chip -1 */
- chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ targetsize = nanddev_target_size(&chip->base);
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
chip->bbt_erase_shift = chip->phys_erase_shift =
ffs(mtd->erasesize) - 1;
- if (chip->chipsize & 0xffffffff)
- chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+ if (targetsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)targetsize) - 1;
else {
- chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
+ chip->chip_shift = ffs((unsigned)(targetsize >> 32));
chip->chip_shift += 32 - 1;
}
@@ -4796,7 +4827,7 @@ ident_done:
pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
chip->parameters.model);
pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
- (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
return 0;
@@ -4971,10 +5002,13 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
struct nand_flash_dev *table)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
int nand_maf_id, nand_dev_id;
unsigned int i;
int ret;
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Assume all dies are deselected when we enter nand_scan_ident(). */
chip->cur_cs = -1;
@@ -4990,12 +5024,6 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- /*
- * Start with chips->numchips = maxchips to let nand_select_target() do
- * its job. chip->numchips will be adjusted after.
- */
- chip->numchips = maxchips;
-
/* Set the default functions */
nand_set_defaults(chip);
@@ -5042,8 +5070,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
pr_info("%d chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
- chip->numchips = i;
- mtd->size = i * chip->chipsize;
+ memorg->ntargets = i;
+ mtd->size = i * nanddev_target_size(&chip->base);
return 0;
}
@@ -5078,13 +5106,13 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
ecc->bytes = 3;
ecc->strength = 1;
- if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
+ if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
return 0;
case NAND_ECC_BCH:
if (!mtd_nand_has_bch()) {
- WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return -EINVAL;
}
ecc->calculate = nand_bch_calculate_ecc;
@@ -5224,8 +5252,8 @@ nand_match_ecc_req(struct nand_chip *chip,
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
- int req_step = chip->ecc_step_ds;
- int req_strength = chip->ecc_strength_ds;
+ int req_step = chip->base.eccreq.step_size;
+ int req_strength = chip->base.eccreq.strength;
int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
int best_step, best_strength, best_ecc_bytes;
int best_ecc_bytes_total = INT_MAX;
@@ -5418,7 +5446,7 @@ static bool nand_ecc_strength_good(struct nand_chip *chip)
struct nand_ecc_ctrl *ecc = &chip->ecc;
int corr, ds_corr;
- if (ecc->size == 0 || chip->ecc_step_ds == 0)
+ if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
/* Not enough information */
return true;
@@ -5427,11 +5455,56 @@ static bool nand_ecc_strength_good(struct nand_chip *chip)
* the correction density.
*/
corr = (mtd->writesize * ecc->strength) / ecc->size;
- ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
+ ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
+ chip->base.eccreq.step_size;
- return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
+ return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
}
+static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ unsigned int eb = nanddev_pos_to_row(nand, pos);
+ int ret;
+
+ eb >>= nand->rowconv.eraseblock_addr_shift;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_erase_op(chip, eb);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static int rawnand_markbad(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+
+ return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+}
+
+static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ int ret;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static const struct nand_ops rawnand_ops = {
+ .erase = rawnand_erase,
+ .markbad = rawnand_markbad,
+ .isbad = rawnand_isbad,
+};
+
/**
* nand_scan_tail - Scan for the NAND device
* @chip: NAND chip object
@@ -5687,7 +5760,7 @@ static int nand_scan_tail(struct nand_chip *chip)
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
/* Invalidate the pagebuffer reference */
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
switch (ecc->mode) {
@@ -5700,10 +5773,15 @@ static int nand_scan_tail(struct nand_chip *chip)
break;
}
+ ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+
+ /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
+ if (chip->options & NAND_ROM)
+ mtd->flags = MTD_CAP_ROM;
+
/* Fill in remaining MTD driver data */
- mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
- mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
- MTD_CAP_NANDFLASH;
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
@@ -5719,8 +5797,7 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->_block_isreserved = nand_block_isreserved;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
- mtd->_max_bad_blocks = nand_max_bad_blocks;
- mtd->writebufsize = mtd->writesize;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
@@ -5733,13 +5810,13 @@ static int nand_scan_tail(struct nand_chip *chip)
/* Initialize the ->data_interface field. */
ret = nand_init_data_interface(chip);
if (ret)
- goto err_nand_manuf_cleanup;
+ goto err_nanddev_cleanup;
/* Enter fastest possible mode on all dies. */
- for (i = 0; i < chip->numchips; i++) {
+ for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
ret = nand_setup_data_interface(chip, i);
if (ret)
- goto err_nand_manuf_cleanup;
+ goto err_nanddev_cleanup;
}
/* Check, if we should skip the bad block table scan */
@@ -5749,11 +5826,14 @@ static int nand_scan_tail(struct nand_chip *chip)
/* Build bad block table */
ret = nand_create_bbt(chip);
if (ret)
- goto err_nand_manuf_cleanup;
+ goto err_nanddev_cleanup;
return 0;
+err_nanddev_cleanup:
+ nanddev_cleanup(&chip->base);
+
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 19a2b563acdf..fd3c10216eda 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -264,18 +264,19 @@ static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, int chip)
{
struct mtd_info *mtd = nand_to_mtd(this);
+ u64 targetsize = nanddev_target_size(&this->base);
int res = 0, i;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
- for (i = 0; i < this->numchips; i++) {
+ for (i = 0; i < nanddev_ntargets(&this->base); i++) {
if (chip == -1 || chip == i)
res = read_bbt(this, buf, td->pages[i],
- this->chipsize >> this->bbt_erase_shift,
+ targetsize >> this->bbt_erase_shift,
td, offs);
if (res)
return res;
- offs += this->chipsize >> this->bbt_erase_shift;
+ offs += targetsize >> this->bbt_erase_shift;
}
} else {
res = read_bbt(this, buf, td->pages[0],
@@ -415,11 +416,12 @@ static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
/* Scan a given block partially */
static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, int numpages)
+ loff_t offs, uint8_t *buf)
{
struct mtd_info *mtd = nand_to_mtd(this);
+
struct mtd_oob_ops ops;
- int j, ret;
+ int ret, page_offset;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
@@ -427,12 +429,15 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
- for (j = 0; j < numpages; j++) {
+ page_offset = nand_bbm_get_next_page(this, 0);
+
+ while (page_offset >= 0) {
/*
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd_read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs + (page_offset * mtd->writesize),
+ &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
@@ -440,8 +445,9 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
if (check_short_pattern(buf, bd))
return 1;
- offs += mtd->writesize;
+ page_offset = nand_bbm_get_next_page(this, page_offset + 1);
}
+
return 0;
}
@@ -459,43 +465,35 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
static int create_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
- int i, numblocks, numpages;
- int startblock;
+ int i, numblocks, startblock;
loff_t from;
pr_info("Scanning device for bad blocks\n");
- if (bd->options & NAND_BBT_SCAN2NDPAGE)
- numpages = 2;
- else
- numpages = 1;
-
if (chip == -1) {
numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
from = 0;
} else {
- if (chip >= this->numchips) {
+ if (chip >= nanddev_ntargets(&this->base)) {
pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
- chip + 1, this->numchips);
+ chip + 1, nanddev_ntargets(&this->base));
return -EINVAL;
}
- numblocks = this->chipsize >> this->bbt_erase_shift;
+ numblocks = targetsize >> this->bbt_erase_shift;
startblock = chip * numblocks;
numblocks += startblock;
from = (loff_t)startblock << this->bbt_erase_shift;
}
- if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
- from += mtd->erasesize - (mtd->writesize * numpages);
-
for (i = startblock; i < numblocks; i++) {
int ret;
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- ret = scan_block_fast(this, bd, from, buf, numpages);
+ ret = scan_block_fast(this, bd, from, buf);
if (ret < 0)
return ret;
@@ -529,6 +527,7 @@ static int create_bbt(struct nand_chip *this, uint8_t *buf,
static int search_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, chips;
int startblock, block, dir;
@@ -547,8 +546,8 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ chips = nanddev_ntargets(&this->base);
+ bbtblocks = targetsize >> this->bbt_erase_shift;
startblock &= bbtblocks - 1;
} else {
chips = 1;
@@ -576,7 +575,7 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
break;
}
}
- startblock += this->chipsize >> this->bbt_erase_shift;
+ startblock += targetsize >> this->bbt_erase_shift;
}
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
@@ -626,6 +625,7 @@ static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
struct nand_bbt_descr *md, int chip)
{
+ u64 targetsize = nanddev_target_size(&this->base);
int startblock, dir, page, numblocks, i;
/*
@@ -637,9 +637,9 @@ static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
return td->pages[chip] >>
(this->bbt_erase_shift - this->page_shift);
- numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
if (!(td->options & NAND_BBT_PERCHIP))
- numblocks *= this->numchips;
+ numblocks *= nanddev_ntargets(&this->base);
/*
* Automatic placement of the bad block table. Search direction
@@ -717,6 +717,7 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
struct erase_info einfo;
int i, res, chip = 0;
@@ -737,10 +738,10 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
rcode = 0xff;
/* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
- numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
/* Full device write or specific chip? */
if (chipsel == -1) {
- nrchips = this->numchips;
+ nrchips = nanddev_ntargets(&this->base);
} else {
nrchips = chipsel + 1;
chip = chipsel;
@@ -901,7 +902,9 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
static inline int nand_memory_bbt(struct nand_chip *this,
struct nand_bbt_descr *bd)
{
- return create_bbt(this, this->data_buf, bd, -1);
+ u8 *pagebuf = nand_get_data_buf(this);
+
+ return create_bbt(this, pagebuf, bd, -1);
}
/**
@@ -925,7 +928,7 @@ static int check_create(struct nand_chip *this, uint8_t *buf,
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
- chips = this->numchips;
+ chips = nanddev_ntargets(&this->base);
else
chips = 1;
@@ -1097,14 +1100,15 @@ static int nand_update_bbt(struct nand_chip *this, loff_t offs)
*/
static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, j, chips, block, nrblocks, update;
uint8_t oldval;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ chips = nanddev_ntargets(&this->base);
+ nrblocks = (int)(targetsize >> this->bbt_erase_shift);
} else {
chips = 1;
nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
@@ -1157,6 +1161,7 @@ static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
*/
static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
u32 pattern_len;
u32 bits;
@@ -1185,7 +1190,7 @@ static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
}
if (bd->options & NAND_BBT_PERCHIP)
- table_size = this->chipsize >> this->bbt_erase_shift;
+ table_size = targetsize >> this->bbt_erase_shift;
else
table_size = mtd->size >> this->bbt_erase_shift;
table_size >>= 3;
diff --git a/drivers/mtd/nand/raw/nand_esmt.c b/drivers/mtd/nand/raw/nand_esmt.c
index 96f039a83bc8..3338c68aaaf1 100644
--- a/drivers/mtd/nand/raw/nand_esmt.c
+++ b/drivers/mtd/nand/raw/nand_esmt.c
@@ -14,20 +14,20 @@ static void esmt_nand_decode_id(struct nand_chip *chip)
/* Extract ECC requirements from 5th id byte. */
if (chip->id.len >= 5 && nand_is_slc(chip)) {
- chip->ecc_step_ds = 512;
+ chip->base.eccreq.step_size = 512;
switch (chip->id.data[4] & 0x3) {
case 0x0:
- chip->ecc_strength_ds = 4;
+ chip->base.eccreq.strength = 4;
break;
case 0x1:
- chip->ecc_strength_ds = 2;
+ chip->base.eccreq.strength = 2;
break;
case 0x2:
- chip->ecc_strength_ds = 1;
+ chip->base.eccreq.strength = 1;
break;
default:
WARN(1, "Could not get ECC info");
- chip->ecc_step_ds = 0;
+ chip->base.eccreq.step_size = 0;
break;
}
}
@@ -36,7 +36,14 @@ static void esmt_nand_decode_id(struct nand_chip *chip)
static int esmt_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ /*
+ * It is known that some ESMT SLC NANDs have been shipped
+ * with the factory bad block markers in the first or last page
+ * of the block, instead of the first or second page. To be on
+ * the safe side, let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 343f477362d1..7c600c4d5ec8 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -418,24 +418,27 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
bool valid_jedecid)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
u8 oobsize;
+ memorg = nanddev_get_memorg(&chip->base);
+
oobsize = ((chip->id.data[3] >> 2) & 0x3) |
((chip->id.data[3] >> 4) & 0x4);
if (valid_jedecid) {
switch (oobsize) {
case 0:
- mtd->oobsize = 2048;
+ memorg->oobsize = 2048;
break;
case 1:
- mtd->oobsize = 1664;
+ memorg->oobsize = 1664;
break;
case 2:
- mtd->oobsize = 1024;
+ memorg->oobsize = 1024;
break;
case 3:
- mtd->oobsize = 640;
+ memorg->oobsize = 640;
break;
default:
/*
@@ -450,25 +453,25 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
} else {
switch (oobsize) {
case 0:
- mtd->oobsize = 128;
+ memorg->oobsize = 128;
break;
case 1:
- mtd->oobsize = 224;
+ memorg->oobsize = 224;
break;
case 2:
- mtd->oobsize = 448;
+ memorg->oobsize = 448;
break;
case 3:
- mtd->oobsize = 64;
+ memorg->oobsize = 64;
break;
case 4:
- mtd->oobsize = 32;
+ memorg->oobsize = 32;
break;
case 5:
- mtd->oobsize = 16;
+ memorg->oobsize = 16;
break;
case 6:
- mtd->oobsize = 640;
+ memorg->oobsize = 640;
break;
default:
/*
@@ -492,8 +495,10 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
* the actual OOB size for this chip is: 640 * 16k / 8k).
*/
if (chip->id.data[1] == 0xde)
- mtd->oobsize *= mtd->writesize / SZ_8K;
+ memorg->oobsize *= memorg->pagesize / SZ_8K;
}
+
+ mtd->oobsize = memorg->oobsize;
}
static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
@@ -503,30 +508,30 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
if (valid_jedecid) {
/* Reference: H27UCG8T2E datasheet */
- chip->ecc_step_ds = 1024;
+ chip->base.eccreq.step_size = 1024;
switch (ecc_level) {
case 0:
- chip->ecc_step_ds = 0;
- chip->ecc_strength_ds = 0;
+ chip->base.eccreq.step_size = 0;
+ chip->base.eccreq.strength = 0;
break;
case 1:
- chip->ecc_strength_ds = 4;
+ chip->base.eccreq.strength = 4;
break;
case 2:
- chip->ecc_strength_ds = 24;
+ chip->base.eccreq.strength = 24;
break;
case 3:
- chip->ecc_strength_ds = 32;
+ chip->base.eccreq.strength = 32;
break;
case 4:
- chip->ecc_strength_ds = 40;
+ chip->base.eccreq.strength = 40;
break;
case 5:
- chip->ecc_strength_ds = 50;
+ chip->base.eccreq.strength = 50;
break;
case 6:
- chip->ecc_strength_ds = 60;
+ chip->base.eccreq.strength = 60;
break;
default:
/*
@@ -547,14 +552,14 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
if (nand_tech < 3) {
/* > 26nm, reference: H27UBG8T2A datasheet */
if (ecc_level < 5) {
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1 << ecc_level;
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << ecc_level;
} else if (ecc_level < 7) {
if (ecc_level == 5)
- chip->ecc_step_ds = 2048;
+ chip->base.eccreq.step_size = 2048;
else
- chip->ecc_step_ds = 1024;
- chip->ecc_strength_ds = 24;
+ chip->base.eccreq.step_size = 1024;
+ chip->base.eccreq.strength = 24;
} else {
/*
* We should never reach this case, but if that
@@ -567,14 +572,14 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
} else {
/* <= 26nm, reference: H27UBG8T2B datasheet */
if (!ecc_level) {
- chip->ecc_step_ds = 0;
- chip->ecc_strength_ds = 0;
+ chip->base.eccreq.step_size = 0;
+ chip->base.eccreq.strength = 0;
} else if (ecc_level < 5) {
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1 << (ecc_level - 1);
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << (ecc_level - 1);
} else {
- chip->ecc_step_ds = 1024;
- chip->ecc_strength_ds = 24 +
+ chip->base.eccreq.step_size = 1024;
+ chip->base.eccreq.strength = 24 +
(8 * (ecc_level - 5));
}
}
@@ -587,7 +592,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
u8 nand_tech;
/* We need scrambling on all TLC NANDs*/
- if (chip->bits_per_cell > 2)
+ if (nanddev_bits_per_cell(&chip->base) > 2)
chip->options |= NAND_NEED_SCRAMBLING;
/* And on MLC NANDs with sub-3xnm process */
@@ -609,9 +614,12 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
static void hynix_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
bool valid_jedecid;
u8 tmp;
+ memorg = nanddev_get_memorg(&chip->base);
+
/*
* Exclude all SLC NANDs from this advanced detection scheme.
* According to the ranges defined in several datasheets, it might
@@ -625,7 +633,8 @@ static void hynix_nand_decode_id(struct nand_chip *chip)
}
/* Extract pagesize */
- mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
+ memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
+ mtd->writesize = memorg->pagesize;
tmp = (chip->id.data[3] >> 4) & 0x3;
/*
@@ -635,12 +644,19 @@ static void hynix_nand_decode_id(struct nand_chip *chip)
* The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
* this case the erasesize is set to 768KiB.
*/
- if (chip->id.data[3] & 0x80)
+ if (chip->id.data[3] & 0x80) {
+ memorg->pages_per_eraseblock = (SZ_1M << tmp) /
+ memorg->pagesize;
mtd->erasesize = SZ_1M << tmp;
- else if (tmp == 3)
+ } else if (tmp == 3) {
+ memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
+ memorg->pagesize;
mtd->erasesize = SZ_512K + SZ_256K;
- else
+ } else {
+ memorg->pages_per_eraseblock = (SZ_128K << tmp) /
+ memorg->pagesize;
mtd->erasesize = SZ_128K << tmp;
+ }
/*
* Modern Toggle DDR NANDs have a valid JEDECID even though they are
@@ -672,9 +688,9 @@ static int hynix_nand_init(struct nand_chip *chip)
int ret;
if (!nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ chip->options |= NAND_BBM_LASTPAGE;
else
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
if (!hynix)
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 38b5dc22cb30..9b540e76f84f 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -22,12 +22,15 @@
int nand_jedec_detect(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
struct nand_jedec_params *p;
struct jedec_ecc_info *ecc;
int jedec_version = 0;
char id[5];
int i, val, ret;
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Try JEDEC for unknown chip or LP */
ret = nand_readid_op(chip, 0x40, id, sizeof(id));
if (ret || strncmp(id, "JEDEC", sizeof(id)))
@@ -81,18 +84,24 @@ int nand_jedec_detect(struct nand_chip *chip)
goto free_jedec_param_page;
}
- mtd->writesize = le32_to_cpu(p->byte_per_page);
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
/* Please reference to the comment for nand_flash_detect_onfi. */
- mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
- mtd->erasesize *= mtd->writesize;
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
- mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->multi_plane_addr;
/* Please reference to the comment for nand_flash_detect_onfi. */
- chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
- chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- chip->bits_per_cell = p->bits_per_cell;
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->bits_per_cell = p->bits_per_cell;
if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
chip->options |= NAND_BUSWIDTH_16;
@@ -101,8 +110,8 @@ int nand_jedec_detect(struct nand_chip *chip)
ecc = &p->ecc_info[0];
if (ecc->codeword_size >= 9) {
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
+ chip->base.eccreq.strength = ecc->ecc_bits;
+ chip->base.eccreq.step_size = 1 << ecc->codeword_size;
} else {
pr_warn("Invalid codeword size\n");
}
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 47d8cda547cf..e287e71347c5 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -62,7 +62,7 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
macronix_nand_fix_broken_get_timings(chip);
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index b85e1c13b79e..cbd4f09ac178 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -385,13 +385,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
if (!chip->parameters.onfi)
return MICRON_ON_DIE_UNSUPPORTED;
- if (chip->bits_per_cell != 1)
+ if (nanddev_bits_per_cell(&chip->base) != 1)
return MICRON_ON_DIE_UNSUPPORTED;
/*
* We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
+ if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
/* 0x2 means on-die ECC is available. */
@@ -424,7 +424,7 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
/*
* We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
+ if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
return MICRON_ON_DIE_SUPPORTED;
@@ -448,7 +448,7 @@ static int micron_nand_init(struct nand_chip *chip)
goto err_free_manuf_data;
if (mtd->writesize == 2048)
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
ondie = micron_supports_on_die_ecc(chip);
@@ -479,7 +479,7 @@ static int micron_nand_init(struct nand_chip *chip)
* That's not needed for 8-bit ECC, because the status expose
* a better approximation of the number of bitflips in a page.
*/
- if (chip->ecc_strength_ds == 4) {
+ if (chip->base.eccreq.strength == 4) {
micron->ecc.rawbuf = kmalloc(mtd->writesize +
mtd->oobsize,
GFP_KERNEL);
@@ -489,16 +489,16 @@ static int micron_nand_init(struct nand_chip *chip)
}
}
- if (chip->ecc_strength_ds == 4)
+ if (chip->base.eccreq.strength == 4)
mtd_set_ooblayout(mtd,
&micron_nand_on_die_4_ooblayout_ops);
else
mtd_set_ooblayout(mtd,
&micron_nand_on_die_8_ooblayout_ops);
- chip->ecc.bytes = chip->ecc_strength_ds * 2;
+ chip->ecc.bytes = chip->base.eccreq.strength * 2;
chip->ecc.size = 512;
- chip->ecc.strength = chip->ecc_strength_ds;
+ chip->ecc.strength = chip->base.eccreq.strength;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index d8184cf591ad..0b879bd0a68c 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -94,8 +94,8 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
goto ext_out;
}
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
+ chip->base.eccreq.strength = ecc->ecc_bits;
+ chip->base.eccreq.step_size = 1 << ecc->codeword_size;
ret = 0;
ext_out:
@@ -140,12 +140,15 @@ static void nand_bit_wise_majority(const void **srcbufs,
int nand_onfi_detect(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
struct nand_onfi_params *p;
struct onfi_params *onfi;
int onfi_version = 0;
char id[4];
int i, ret, val;
+ memorg = nanddev_get_memorg(&chip->base);
+
/* Try ONFI for unknown chip or LP */
ret = nand_readid_op(chip, 0x20, id, sizeof(id));
if (ret || strncmp(id, "ONFI", 4))
@@ -221,32 +224,36 @@ int nand_onfi_detect(struct nand_chip *chip)
goto free_onfi_param_page;
}
- mtd->writesize = le32_to_cpu(p->byte_per_page);
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
/*
* pages_per_block and blocks_per_lun may not be a power-of-2 size
* (don't ask me who thought of this...). MTD assumes that these
* dimensions will be power-of-2, so just truncate the remaining area.
*/
- mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
- mtd->erasesize *= mtd->writesize;
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
- mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
- /* See erasesize comment */
- chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
- chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- chip->bits_per_cell = p->bits_per_cell;
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->interleaved_bits;
- chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
- chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
+ /* See erasesize comment */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->max_bad_eraseblocks_per_lun = le32_to_cpu(p->blocks_per_lun);
+ memorg->bits_per_cell = p->bits_per_cell;
if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
chip->options |= NAND_BUSWIDTH_16;
if (p->ecc_bits != 0xff) {
- chip->ecc_strength_ds = p->ecc_bits;
- chip->ecc_step_ds = 512;
+ chip->base.eccreq.strength = p->ecc_bits;
+ chip->base.eccreq.step_size = 512;
} else if (onfi_version >= 21 &&
(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
diff --git a/drivers/mtd/nand/raw/nand_samsung.c b/drivers/mtd/nand/raw/nand_samsung.c
index e46d4c492ad8..5552ce20ede0 100644
--- a/drivers/mtd/nand/raw/nand_samsung.c
+++ b/drivers/mtd/nand/raw/nand_samsung.c
@@ -20,6 +20,9 @@
static void samsung_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
/* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */
if (chip->id.len == 6 && !nand_is_slc(chip) &&
@@ -27,29 +30,30 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
u8 extid = chip->id.data[3];
/* Get pagesize */
- mtd->writesize = 2048 << (extid & 0x03);
+ memorg->pagesize = 2048 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
extid >>= 2;
/* Get oobsize */
switch (((extid >> 2) & 0x4) | (extid & 0x3)) {
case 1:
- mtd->oobsize = 128;
+ memorg->oobsize = 128;
break;
case 2:
- mtd->oobsize = 218;
+ memorg->oobsize = 218;
break;
case 3:
- mtd->oobsize = 400;
+ memorg->oobsize = 400;
break;
case 4:
- mtd->oobsize = 436;
+ memorg->oobsize = 436;
break;
case 5:
- mtd->oobsize = 512;
+ memorg->oobsize = 512;
break;
case 6:
- mtd->oobsize = 640;
+ memorg->oobsize = 640;
break;
default:
/*
@@ -62,31 +66,37 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
break;
}
+ mtd->oobsize = memorg->oobsize;
+
/* Get blocksize */
extid >>= 2;
+ memorg->pages_per_eraseblock = (128 * 1024) <<
+ (((extid >> 1) & 0x04) |
+ (extid & 0x03)) /
+ memorg->pagesize;
mtd->erasesize = (128 * 1024) <<
(((extid >> 1) & 0x04) | (extid & 0x03));
/* Extract ECC requirements from 5th id byte*/
extid = (chip->id.data[4] >> 4) & 0x07;
if (extid < 5) {
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1 << extid;
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << extid;
} else {
- chip->ecc_step_ds = 1024;
+ chip->base.eccreq.step_size = 1024;
switch (extid) {
case 5:
- chip->ecc_strength_ds = 24;
+ chip->base.eccreq.strength = 24;
break;
case 6:
- chip->ecc_strength_ds = 40;
+ chip->base.eccreq.strength = 40;
break;
case 7:
- chip->ecc_strength_ds = 60;
+ chip->base.eccreq.strength = 60;
break;
default:
WARN(1, "Could not decode ECC info");
- chip->ecc_step_ds = 0;
+ chip->base.eccreq.step_size = 0;
}
}
} else {
@@ -96,8 +106,8 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
switch (chip->id.data[1]) {
/* K9F4G08U0D-S[I|C]B0(T00) */
case 0xDC:
- chip->ecc_step_ds = 512;
- chip->ecc_strength_ds = 1;
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1;
break;
/* K9F1G08U0E 21nm chips do not support subpage write */
@@ -121,9 +131,9 @@ static int samsung_nand_init(struct nand_chip *chip)
chip->options |= NAND_SAMSUNG_LP_OPTIONS;
if (!nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ chip->options |= NAND_BBM_LASTPAGE;
else
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index d068163b64b3..74ffcae48726 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -101,6 +101,9 @@ static void toshiba_nand_benand_init(struct nand_chip *chip)
static void toshiba_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
nand_decode_ext_id(chip);
@@ -114,8 +117,10 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
*/
if (chip->id.len >= 6 && nand_is_slc(chip) &&
(chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
- !(chip->id.data[4] & 0x80) /* !BENAND */)
- mtd->oobsize = 32 * mtd->writesize >> 9;
+ !(chip->id.data[4] & 0x80) /* !BENAND */) {
+ memorg->oobsize = 32 * memorg->pagesize >> 9;
+ mtd->oobsize = memorg->oobsize;
+ }
/*
* Extract ECC requirements from 6th id byte.
@@ -125,20 +130,20 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
* - 24nm: 8 bit ECC for each 512Byte is required.
*/
if (chip->id.len >= 6 && nand_is_slc(chip)) {
- chip->ecc_step_ds = 512;
+ chip->base.eccreq.step_size = 512;
switch (chip->id.data[5] & 0x7) {
case 0x4:
- chip->ecc_strength_ds = 1;
+ chip->base.eccreq.strength = 1;
break;
case 0x5:
- chip->ecc_strength_ds = 4;
+ chip->base.eccreq.strength = 4;
break;
case 0x6:
- chip->ecc_strength_ds = 8;
+ chip->base.eccreq.strength = 8;
break;
default:
WARN(1, "Could not get ECC info");
- chip->ecc_step_ds = 0;
+ chip->base.eccreq.step_size = 0;
break;
}
}
@@ -147,7 +152,7 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
static int toshiba_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
/* Check that chip is BENAND and ECC mode is on-die */
if (nand_is_slc(chip) && chip->ecc.mode == NAND_ECC_ON_DIE &&
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 933d1a629c51..df63fa564082 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -298,6 +298,8 @@ union ns_mem {
* The structure which describes all the internal simulator data.
*/
struct nandsim {
+ struct nand_chip chip;
+ struct nand_controller base;
struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
@@ -644,9 +646,6 @@ static int __init init_nandsim(struct mtd_info *mtd)
return -EIO;
}
- /* Force mtd to not do delays */
- chip->legacy.chip_delay = 0;
-
/* Initialize the NAND flash parameters */
ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
ns->geom.totsz = mtd->size;
@@ -2076,24 +2075,6 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
return;
}
-static void ns_hwcontrol(struct nand_chip *chip, int cmd, unsigned int bitmask)
-{
- struct nandsim *ns = nand_get_controller_data(chip);
-
- ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
- ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
- ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
-
- if (cmd != NAND_CMD_NONE)
- ns_nand_write_byte(chip, cmd);
-}
-
-static int ns_device_ready(struct nand_chip *chip)
-{
- NS_DBG("device_ready\n");
- return 1;
-}
-
static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
int len)
{
@@ -2145,7 +2126,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
int i;
for (i = 0; i < len; i++)
- buf[i] = chip->legacy.read_byte(chip);
+ buf[i] = ns_nand_read_byte(chip);
return;
}
@@ -2168,6 +2149,46 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
return;
}
+static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ int i;
+ unsigned int op_id;
+ const struct nand_op_instr *instr = NULL;
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ ns->lines.ce = 1;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ ns->lines.cle = 0;
+ ns->lines.ale = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ns->lines.cle = 1;
+ ns_nand_write_byte(chip, instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ ns->lines.ale = 1;
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ ns_nand_write_byte(chip, instr->ctx.addr.addrs[i]);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ns_nand_read_buf(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ns_nand_write_buf(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ /* we are always ready */
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int ns_attach_chip(struct nand_chip *chip)
{
unsigned int eccsteps, eccbytes;
@@ -2208,6 +2229,7 @@ static int ns_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops ns_controller_ops = {
.attach_chip = ns_attach_chip,
+ .exec_op = ns_exec_op,
};
/*
@@ -2216,7 +2238,7 @@ static const struct nand_controller_ops ns_controller_ops = {
static int __init ns_init_module(void)
{
struct nand_chip *chip;
- struct nandsim *nand;
+ struct nandsim *ns;
int retval = -ENOMEM, i;
if (bus_width != 8 && bus_width != 16) {
@@ -2224,25 +2246,15 @@ static int __init ns_init_module(void)
return -EINVAL;
}
- /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
- chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
- GFP_KERNEL);
- if (!chip) {
+ ns = kzalloc(sizeof(struct nandsim), GFP_KERNEL);
+ if (!ns) {
NS_ERR("unable to allocate core structures.\n");
return -ENOMEM;
}
+ chip = &ns->chip;
nsmtd = nand_to_mtd(chip);
- nand = (struct nandsim *)(chip + 1);
- nand_set_controller_data(chip, (void *)nand);
+ nand_set_controller_data(chip, (void *)ns);
- /*
- * Register simulator's callbacks.
- */
- chip->legacy.cmd_ctrl = ns_hwcontrol;
- chip->legacy.read_byte = ns_nand_read_byte;
- chip->legacy.dev_ready = ns_device_ready;
- chip->legacy.write_buf = ns_nand_write_buf;
- chip->legacy.read_buf = ns_nand_read_buf;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
@@ -2251,9 +2263,11 @@ static int __init ns_init_module(void)
switch (bbt) {
case 2:
- chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ /* fall through */
case 1:
- chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ /* fall through */
case 0:
break;
default:
@@ -2266,19 +2280,19 @@ static int __init ns_init_module(void)
* the initial ID read command correctly
*/
if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
- nand->geom.idbytes = 8;
+ ns->geom.idbytes = 8;
else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
- nand->geom.idbytes = 6;
+ ns->geom.idbytes = 6;
else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
- nand->geom.idbytes = 4;
+ ns->geom.idbytes = 4;
else
- nand->geom.idbytes = 2;
- nand->regs.status = NS_STATUS_OK(nand);
- nand->nxstate = STATE_UNKNOWN;
- nand->options |= OPT_PAGE512; /* temporary value */
- memcpy(nand->ids, id_bytes, sizeof(nand->ids));
+ ns->geom.idbytes = 2;
+ ns->regs.status = NS_STATUS_OK(ns);
+ ns->nxstate = STATE_UNKNOWN;
+ ns->options |= OPT_PAGE512; /* temporary value */
+ memcpy(ns->ids, id_bytes, sizeof(ns->ids));
if (bus_width == 16) {
- nand->busw = 16;
+ ns->busw = 16;
chip->options |= NAND_BUSWIDTH_16;
}
@@ -2293,7 +2307,10 @@ static int __init ns_init_module(void)
if ((retval = parse_gravepages()) != 0)
goto error;
- chip->legacy.dummy_controller.ops = &ns_controller_ops;
+ nand_controller_init(&ns->base);
+ ns->base.ops = &ns_controller_ops;
+ chip->controller = &ns->base;
+
retval = nand_scan(chip, 1);
if (retval) {
NS_ERR("Could not scan NAND Simulator device\n");
@@ -2302,16 +2319,23 @@ static int __init ns_init_module(void)
if (overridesize) {
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ struct nand_memory_organization *memorg;
+ u64 targetsize;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
retval = -EINVAL;
goto err_exit;
}
+
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
nsmtd->size = new_size;
- chip->chipsize = new_size;
+ memorg->eraseblocks_per_lun = 1 << overridesize;
+ targetsize = nanddev_target_size(&chip->base);
chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
- chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
}
if ((retval = setup_wear_reporting(nsmtd)) != 0)
@@ -2323,27 +2347,27 @@ static int __init ns_init_module(void)
if ((retval = nand_create_bbt(chip)) != 0)
goto err_exit;
- if ((retval = parse_badblocks(nand, nsmtd)) != 0)
+ if ((retval = parse_badblocks(ns, nsmtd)) != 0)
goto err_exit;
/* Register NAND partitions */
- retval = mtd_device_register(nsmtd, &nand->partitions[0],
- nand->nbparts);
+ retval = mtd_device_register(nsmtd, &ns->partitions[0],
+ ns->nbparts);
if (retval != 0)
goto err_exit;
- if ((retval = nandsim_debugfs_create(nand)) != 0)
+ if ((retval = nandsim_debugfs_create(ns)) != 0)
goto err_exit;
return 0;
err_exit:
- free_nandsim(nand);
+ free_nandsim(ns);
nand_release(chip);
- for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
- kfree(nand->partitions[i].name);
+ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
error:
- kfree(chip);
+ kfree(ns);
free_lists();
return retval;
@@ -2364,7 +2388,7 @@ static void __exit ns_cleanup_module(void)
nand_release(chip); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
kfree(ns->partitions[i].name);
- kfree(mtd_to_nand(nsmtd)); /* Free other structures */
+ kfree(ns); /* Free other structures */
free_lists();
}
diff --git a/drivers/mtd/nand/raw/nuc900_nand.c b/drivers/mtd/nand/raw/nuc900_nand.c
index 38b1994e7ed3..56fa84029482 100644
--- a/drivers/mtd/nand/raw/nuc900_nand.c
+++ b/drivers/mtd/nand/raw/nuc900_nand.c
@@ -192,8 +192,9 @@ static void nuc900_nand_command_lp(struct nand_chip *chip,
return;
case NAND_CMD_READ0:
-
write_cmd_reg(nand, NAND_CMD_READSTART);
+ /* fall through */
+
default:
if (!chip->legacy.dev_ready) {
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 8f280a2962c8..a9a275342a41 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1725,9 +1725,9 @@ static bool omap2_nand_ecc_check(struct omap_nand_info *info)
break;
}
- if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_BCH)) {
+ if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
dev_err(&info->pdev->dev,
- "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return false;
}
if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index a3f32f939cc1..94c6401ef32f 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -465,11 +465,13 @@ static int elm_context_save(struct elm_info *info)
ELM_SYNDROME_FRAGMENT_5 + offset);
regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_4 + offset);
+ /* fall through */
case BCH8_ECC:
regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_3 + offset);
regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_2 + offset);
+ /* fall through */
case BCH4_ECC:
regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_1 + offset);
@@ -511,11 +513,13 @@ static int elm_context_restore(struct elm_info *info)
regs->elm_syndrome_fragment_5[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
regs->elm_syndrome_fragment_4[i]);
+ /* fall through */
case BCH8_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
regs->elm_syndrome_fragment_3[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
regs->elm_syndrome_fragment_2[i]);
+ /* fall through */
case BCH4_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
regs->elm_syndrome_fragment_1[i]);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 920e7375084f..6ead55e05b80 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1680,14 +1680,12 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
u8 *cw_data_buf, *cw_oob_buf;
int cw, data_size, oob_size, ret = 0;
- if (!data_buf) {
- data_buf = chip->data_buf;
- chip->pagebuf = -1;
- }
+ if (!data_buf)
+ data_buf = nand_get_data_buf(chip);
if (!oob_buf) {
+ nand_get_data_buf(chip);
oob_buf = chip->oob_poi;
- chip->pagebuf = -1;
}
for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index cf6b1be1cf9c..e509c93737c4 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -101,14 +101,12 @@ static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr flctl_4secc_smallpage = {
- .options = NAND_BBT_SCAN2NDPAGE,
.offs = 11,
.len = 1,
.pattern = scan_ff_pattern,
};
static struct nand_bbt_descr flctl_4secc_largepage = {
- .options = NAND_BBT_SCAN2NDPAGE,
.offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
@@ -986,6 +984,7 @@ static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
static int flctl_chip_attach_chip(struct nand_chip *chip)
{
+ u64 targetsize = nanddev_target_size(&chip->base);
struct mtd_info *mtd = nand_to_mtd(chip);
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -998,11 +997,11 @@ static int flctl_chip_attach_chip(struct nand_chip *chip)
if (mtd->writesize == 512) {
flctl->page_size = 0;
- if (chip->chipsize > (32 << 20)) {
+ if (targetsize > (32 << 20)) {
/* big than 32MB */
flctl->rw_ADRCNT = ADRCNT_4;
flctl->erase_ADRCNT = ADRCNT_3;
- } else if (chip->chipsize > (2 << 16)) {
+ } else if (targetsize > (2 << 16)) {
/* big than 128KB */
flctl->rw_ADRCNT = ADRCNT_3;
flctl->erase_ADRCNT = ADRCNT_2;
@@ -1012,11 +1011,11 @@ static int flctl_chip_attach_chip(struct nand_chip *chip)
}
} else {
flctl->page_size = 1;
- if (chip->chipsize > (128 << 20)) {
+ if (targetsize > (128 << 20)) {
/* big than 128MB */
flctl->rw_ADRCNT = ADRCNT2_E;
flctl->erase_ADRCNT = ADRCNT_3;
- } else if (chip->chipsize > (8 << 16)) {
+ } else if (targetsize > (8 << 16)) {
/* big than 512KB */
flctl->rw_ADRCNT = ADRCNT_4;
flctl->erase_ADRCNT = ADRCNT_2;
@@ -1178,6 +1177,8 @@ static int flctl_probe(struct platform_device *pdev)
if (pdata->flcmncr_val & SEL_16BIT)
nand->options |= NAND_BUSWIDTH_16;
+ nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 4282bc477761..b021a5720b42 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -42,7 +42,8 @@
#define NFC_REG_CMD 0x0024
#define NFC_REG_RCMD_SET 0x0028
#define NFC_REG_WCMD_SET 0x002C
-#define NFC_REG_IO_DATA 0x0030
+#define NFC_REG_A10_IO_DATA 0x0030
+#define NFC_REG_A23_IO_DATA 0x0300
#define NFC_REG_ECC_CTL 0x0034
#define NFC_REG_ECC_ST 0x0038
#define NFC_REG_DEBUG 0x003C
@@ -200,6 +201,22 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
return container_of(nand, struct sunxi_nand_chip, nand);
}
+/*
+ * NAND Controller capabilities structure: stores NAND controller capabilities
+ * for distinction between compatible strings.
+ *
+ * @sram_through_ahb: On A23, we choose to access the internal RAM through AHB
+ * instead of MBUS (less configuration). A10, A10s, A13 and
+ * A20 use the MBUS but no extra configuration is needed.
+ * @reg_io_data: I/O data register
+ * @dma_maxburst: DMA maxburst
+ */
+struct sunxi_nfc_caps {
+ bool sram_through_ahb;
+ unsigned int reg_io_data;
+ unsigned int dma_maxburst;
+};
+
/**
* struct sunxi_nfc - stores sunxi NAND controller information
*
@@ -228,6 +245,7 @@ struct sunxi_nfc {
struct list_head chips;
struct completion complete;
struct dma_chan *dmac;
+ const struct sunxi_nfc_caps *caps;
};
static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
@@ -350,10 +368,29 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
goto err_unmap_buf;
}
- writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
+ /*
+ * On A23, we suppose the "internal RAM" (p.12 of the NFC user manual)
+ * refers to the NAND controller's internal SRAM. This memory is mapped
+ * and so is accessible from the AHB. It seems that it can also be
+ * accessed by the MBUS. MBUS accesses are mandatory when using the
+ * internal DMA instead of the external DMA engine.
+ *
+ * During DMA I/O operation, either we access this memory from the AHB
+ * by clearing the NFC_RAM_METHOD bit, or we set the bit and use the
+ * MBUS. In this case, we should also configure the MBUS DMA length
+ * NFC_REG_MDMA_CNT(0xC4) to be chunksize * nchunks. NAND I/O over MBUS
+ * are also limited to 32kiB pages.
+ */
+ if (nfc->caps->sram_through_ahb)
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+ else
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+
writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
writel(chunksize, nfc->regs + NFC_REG_CNT);
+
dmat = dmaengine_submit(dmad);
ret = dma_submit_error(dmat);
@@ -1313,20 +1350,19 @@ pio_fallback:
static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
{
- nand->pagebuf = -1;
+ u8 *buf = nand_get_data_buf(nand);
- return nand->ecc.read_page(nand, nand->data_buf, 1, page);
+ return nand->ecc.read_page(nand, buf, 1, page);
}
static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
+ u8 *buf = nand_get_data_buf(nand);
int ret;
- nand->pagebuf = -1;
-
- memset(nand->data_buf, 0xff, mtd->writesize);
- ret = nand->ecc.write_page(nand, nand->data_buf, 1, page);
+ memset(buf, 0xff, mtd->writesize);
+ ret = nand->ecc.write_page(nand, buf, 1, page);
if (ret)
return ret;
@@ -1724,8 +1760,8 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
nand->options |= NAND_SUBPAGE_READ;
if (!ecc->size) {
- ecc->size = nand->ecc_step_ds;
- ecc->strength = nand->ecc_strength_ds;
+ ecc->size = nand->base.eccreq.step_size;
+ ecc->strength = nand->base.eccreq.strength;
}
if (!ecc->size || !ecc->strength)
@@ -2088,6 +2124,12 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
goto out_mod_clk_unprepare;
}
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+ if (!nfc->caps) {
+ ret = -EINVAL;
+ goto out_ahb_reset_reassert;
+ }
+
ret = sunxi_nfc_rst(nfc);
if (ret)
goto out_ahb_reset_reassert;
@@ -2102,12 +2144,12 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (nfc->dmac) {
struct dma_slave_config dmac_cfg = { };
- dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
+ dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
dmac_cfg.dst_addr = dmac_cfg.src_addr;
dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
- dmac_cfg.src_maxburst = 4;
- dmac_cfg.dst_maxburst = 4;
+ dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
+ dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
dmaengine_slave_config(nfc->dmac, &dmac_cfg);
} else {
dev_warn(dev, "failed to request rxtx DMA channel\n");
@@ -2152,8 +2194,26 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
return 0;
}
+static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
+ .reg_io_data = NFC_REG_A10_IO_DATA,
+ .dma_maxburst = 4,
+};
+
+static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
+ .sram_through_ahb = true,
+ .reg_io_data = NFC_REG_A23_IO_DATA,
+ .dma_maxburst = 8,
+};
+
static const struct of_device_id sunxi_nfc_ids[] = {
- { .compatible = "allwinner,sun4i-a10-nand" },
+ {
+ .compatible = "allwinner,sun4i-a10-nand",
+ .data = &sunxi_nfc_a10_caps,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-nand-controller",
+ .data = &sunxi_nfc_a23_caps,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 13be32c38194..3cc9a4c41443 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -853,7 +853,7 @@ static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
} else {
strength_sel = strength[i];
- if (strength_sel < chip->ecc_strength_ds)
+ if (strength_sel < chip->base.eccreq.strength)
continue;
}
@@ -917,9 +917,9 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 512;
chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if (chip->ecc_step_ds != 512) {
+ if (chip->base.eccreq.step_size != 512) {
dev_err(ctrl->dev, "Unsupported step size %d\n",
- chip->ecc_step_ds);
+ chip->base.eccreq.step_size);
return -EINVAL;
}
@@ -950,7 +950,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
if (ret < 0) {
dev_err(ctrl->dev,
"No valid strength found, minimum %d\n",
- chip->ecc_strength_ds);
+ chip->base.eccreq.strength);
return ret;
}
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index a662ca1970e5..e4fe8c4bc711 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -364,7 +364,7 @@ static int vf610_nfc_cmd(struct nand_chip *chip,
{
const struct nand_op_instr *instr;
struct vf610_nfc *nfc = chip_to_nfc(chip);
- int op_id = -1, trfr_sz = 0, offset;
+ int op_id = -1, trfr_sz = 0, offset = 0;
u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
bool force8bit = false;
@@ -850,6 +850,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
}
of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+ if (!of_id)
+ return -ENODEV;
+
nfc->variant = (enum vf610_nfc_variant)of_id->data;
for_each_available_child_of_node(nfc->dev->of_node, child) {
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index fa87ae28cdfe..4c15bb58c623 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -19,21 +19,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- u16 *column)
-{
- struct nand_device *nand = spinand_to_nand(spinand);
- unsigned int shift;
-
- if (nand->memorg.planes_per_lun < 2)
- return;
-
- /* The plane number is passed in MSB just above the column address */
- shift = fls(nand->memorg.pagesize);
- *column |= req->pos.plane << shift;
-}
-
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
@@ -227,27 +212,21 @@ static int spinand_load_page_op(struct spinand_device *spinand,
static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.read_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
u16 column = 0;
- int ret;
+ ssize_t ret;
if (req->datalen) {
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.dataoffs = 0;
- adjreq.databuf.in = spinand->databuf;
buf = spinand->databuf;
- nbytes = adjreq.datalen;
+ nbytes = nanddev_page_size(nand);
+ column = 0;
}
if (req->ooblen) {
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.in = spinand->oobbuf;
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
@@ -255,28 +234,19 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
}
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
- op.addr.val = column;
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
- /*
- * Some controllers are limited in term of max RX data size. In this
- * case, just repeat the READ_CACHE operation after updating the
- * column.
- */
while (nbytes) {
- op.data.buf.in = buf;
- op.data.nbytes = nbytes;
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
- return ret;
+ if (!ret || ret > nbytes)
+ return -EIO;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
if (req->datalen)
@@ -300,14 +270,12 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.write_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct spi_mem_dirmap_desc *wdesc;
+ unsigned int nbytes, column = 0;
void *buf = spinand->databuf;
- unsigned int nbytes;
- u16 column = 0;
- int ret;
+ ssize_t ret;
/*
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
@@ -318,12 +286,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
memset(spinand->databuf, 0xff, nbytes);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.out = spinand->oobbuf;
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
@@ -340,42 +302,19 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
req->ooblen);
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
- op = *spinand->op_templates.write_cache;
- op.addr.val = column;
-
- /*
- * Some controllers are limited in term of max TX data size. In this
- * case, split the operation into one LOAD CACHE and one or more
- * LOAD RANDOM CACHE.
- */
while (nbytes) {
- op.data.buf.out = buf;
- op.data.nbytes = nbytes;
-
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
+ if (ret < 0)
return ret;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ if (!ret || ret > nbytes)
+ return -EIO;
- /*
- * We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation might
- * reset the cache to 0xff.
- */
- if (nbytes) {
- column = op.addr.val;
- op = *spinand->op_templates.update_cache;
- op.addr.val = column;
- }
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
}
return 0;
@@ -755,6 +694,59 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static int spinand_create_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_info info = {
+ .length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ };
+ struct spi_mem_dirmap_desc *desc;
+
+ /* The plane number is passed in MSB just above the column address */
+ info.offset = plane << fls(nand->memorg.pagesize);
+
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ return 0;
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i, ret;
+
+ spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
+ sizeof(*spinand->dirmaps) *
+ nand->memorg.planes_per_lun,
+ GFP_KERNEL);
+ if (!spinand->dirmaps)
+ return -ENOMEM;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+ ret = spinand_create_dirmap(spinand, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_ops spinand_ops = {
.erase = spinand_erase,
.markbad = spinand_markbad,
@@ -1012,6 +1004,14 @@ static int spinand_init(struct spinand_device *spinand)
goto err_free_bufs;
}
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_manuf_cleanup;
+ }
+
/* After power up, all blocks are locked, so unlock them here. */
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
@@ -1037,6 +1037,7 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_block_markbad = spinand_mtd_block_markbad;
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
if (spinand->eccinfo.ooblayout)
mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 0b49d8264bef..e5586390026a 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -162,7 +162,7 @@ static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = {
static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO("GD5F1GQ4xA", 0xF1,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -171,7 +171,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F2GQ4xA", 0xF2,
- NAND_MEMORG(1, 2048, 64, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -180,7 +180,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA", 0xF4,
- NAND_MEMORG(1, 2048, 64, 64, 4096, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -189,7 +189,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
- NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index d16b57081c95..6502727049a8 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -100,7 +100,7 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB", 0x12,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -109,7 +109,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB", 0x22,
- NAND_MEMORG(1, 2048, 64, 64, 2048, 2, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 9c4381d6847b..7d7b1f7fcf71 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -92,7 +92,7 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info micron_spinand_table[] = {
SPINAND_INFO("MT29F2G01ABAGD", 0x24,
- NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index db8021da45b5..1cb3760ff779 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -96,7 +96,7 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info toshiba_spinand_table[] = {
/* 3.3V 1Gb */
SPINAND_INFO("TC58CVG0S3", 0xC2,
- NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -106,7 +106,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 3.3V 2Gb */
SPINAND_INFO("TC58CVG1S3", 0xCB,
- NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -116,7 +116,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 3.3V 4Gb */
SPINAND_INFO("TC58CVG2S0", 0xCD,
- NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -126,7 +126,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 1.8V 1Gb */
SPINAND_INFO("TC58CYG0S3", 0xB2,
- NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -136,7 +136,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 1.8V 2Gb */
SPINAND_INFO("TC58CYG1S3", 0xBB,
- NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -146,7 +146,7 @@ static const struct spinand_info toshiba_spinand_table[] = {
tc58cxgxsx_ecc_get_status)),
/* 1.8V 4Gb */
SPINAND_INFO("TC58CYG2S0", 0xBD,
- NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 5d944580b898..a6c17e0cace8 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -76,7 +76,7 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
static const struct spinand_info winbond_spinand_table[] = {
SPINAND_INFO("W25M02GV", 0xAB,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 2),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -85,7 +85,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
SPINAND_INFO("W25N01GV", 0xAA,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index fccf1950e92d..bc201327dda0 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -1,3 +1,30 @@
+config MTD_PARSER_IMAGETAG
+ tristate "Parser for BCM963XX Image Tag format partitions"
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ select CRC32
+ help
+ Image Tag is the firmware header used by broadcom on their xDSL line
+ of devices. It is used to describe the offsets and lengths of kernel
+ and rootfs partitions.
+ This driver adds support for parsing a partition with an Image Tag
+ header and creates up to two partitions, kernel and rootfs.
+
+config MTD_AFS_PARTS
+ tristate "ARM Firmware Suite partition parsing"
+ depends on (ARM || ARM64)
+ help
+ The ARM Firmware Suite allows the user to divide flash devices into
+ multiple 'images'. Each such image has a header containing its name
+ and offset/size etc.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
+
config MTD_PARSER_TRX
tristate "Parser for TRX format partitions"
depends on MTD && (BCM47XX || ARCH_BCM_5301X || COMPILE_TEST)
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index d8418bf6804a..cddc8f35a856 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1,3 +1,5 @@
+obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
+obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
new file mode 100644
index 000000000000..0c730024f806
--- /dev/null
+++ b/drivers/mtd/parsers/afs.c
@@ -0,0 +1,410 @@
+/*======================================================================
+
+ drivers/mtd/afs.c: ARM Flash Layout/Partitioning
+
+ Copyright © 2000 ARM Limited
+ Copyright (C) 2019 Linus Walleij
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ This is access code for flashes using ARM's flash partitioning
+ standards.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define AFSV1_FOOTER_MAGIC 0xA0FFFF9F
+#define AFSV2_FOOTER_MAGIC1 0x464C5348 /* "FLSH" */
+#define AFSV2_FOOTER_MAGIC2 0x464F4F54 /* "FOOT" */
+
+struct footer_v1 {
+ u32 image_info_base; /* Address of first word of ImageFooter */
+ u32 image_start; /* Start of area reserved by this footer */
+ u32 signature; /* 'Magic' number proves it's a footer */
+ u32 type; /* Area type: ARM Image, SIB, customer */
+ u32 checksum; /* Just this structure */
+};
+
+struct image_info_v1 {
+ u32 bootFlags; /* Boot flags, compression etc. */
+ u32 imageNumber; /* Unique number, selects for boot etc. */
+ u32 loadAddress; /* Address program should be loaded to */
+ u32 length; /* Actual size of image */
+ u32 address; /* Image is executed from here */
+ char name[16]; /* Null terminated */
+ u32 headerBase; /* Flash Address of any stripped header */
+ u32 header_length; /* Length of header in memory */
+ u32 headerType; /* AIF, RLF, s-record etc. */
+ u32 checksum; /* Image checksum (inc. this struct) */
+};
+
+static u32 word_sum(void *words, int num)
+{
+ u32 *p = words;
+ u32 sum = 0;
+
+ while (num--)
+ sum += *p++;
+
+ return sum;
+}
+
+static u32 word_sum_v2(u32 *p, u32 num)
+{
+ u32 sum = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ u32 val;
+
+ val = p[i];
+ if (val > ~sum)
+ sum++;
+ sum += val;
+ }
+ return ~sum;
+}
+
+static bool afs_is_v1(struct mtd_info *mtd, u_int off)
+{
+ /* The magic is 12 bytes from the end of the erase block */
+ u_int ptr = off + mtd->erasesize - 12;
+ u32 magic;
+ size_t sz;
+ int ret;
+
+ ret = mtd_read(mtd, ptr, 4, &sz, (u_char *)&magic);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return false;
+ }
+ if (ret >= 0 && sz != 4)
+ return false;
+
+ return (magic == AFSV1_FOOTER_MAGIC);
+}
+
+static bool afs_is_v2(struct mtd_info *mtd, u_int off)
+{
+ /* The magic is the 8 last bytes of the erase block */
+ u_int ptr = off + mtd->erasesize - 8;
+ u32 foot[2];
+ size_t sz;
+ int ret;
+
+ ret = mtd_read(mtd, ptr, 8, &sz, (u_char *)foot);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return false;
+ }
+ if (ret >= 0 && sz != 8)
+ return false;
+
+ return (foot[0] == AFSV2_FOOTER_MAGIC1 &&
+ foot[1] == AFSV2_FOOTER_MAGIC2);
+}
+
+static int afs_parse_v1_partition(struct mtd_info *mtd,
+ u_int off, struct mtd_partition *part)
+{
+ struct footer_v1 fs;
+ struct image_info_v1 iis;
+ u_int mask;
+ /*
+ * Static checks cannot see that we bail out if we have an error
+ * reading the footer.
+ */
+ u_int uninitialized_var(iis_ptr);
+ u_int uninitialized_var(img_ptr);
+ u_int ptr;
+ size_t sz;
+ int ret;
+ int i;
+
+ /*
+ * This is the address mask; we use this to mask off out of
+ * range address bits.
+ */
+ mask = mtd->size - 1;
+
+ ptr = off + mtd->erasesize - sizeof(fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
+ if (ret >= 0 && sz != sizeof(fs))
+ ret = -EINVAL;
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return ret;
+ }
+ /*
+ * Check the checksum.
+ */
+ if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
+ return -EINVAL;
+
+ /*
+ * Hide the SIB (System Information Block)
+ */
+ if (fs.type == 2)
+ return 0;
+
+ iis_ptr = fs.image_info_base & mask;
+ img_ptr = fs.image_start & mask;
+
+ /*
+ * Check the image info base. This can not
+ * be located after the footer structure.
+ */
+ if (iis_ptr >= ptr)
+ return 0;
+
+ /*
+ * Check the start of this image. The image
+ * data can not be located after this block.
+ */
+ if (img_ptr > off)
+ return 0;
+
+ /* Read the image info block */
+ memset(&iis, 0, sizeof(iis));
+ ret = mtd_read(mtd, iis_ptr, sizeof(iis), &sz, (u_char *)&iis);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ iis_ptr, ret);
+ return -EINVAL;
+ }
+
+ if (sz != sizeof(iis))
+ return -EINVAL;
+
+ /*
+ * Validate the name - it must be NUL terminated.
+ */
+ for (i = 0; i < sizeof(iis.name); i++)
+ if (iis.name[i] == '\0')
+ break;
+ if (i > sizeof(iis.name))
+ return -EINVAL;
+
+ part->name = kstrdup(iis.name, GFP_KERNEL);
+ if (!part->name)
+ return -ENOMEM;
+
+ part->size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
+ part->offset = img_ptr;
+ part->mask_flags = 0;
+
+ printk(" mtd: at 0x%08x, %5lluKiB, %8u, %s\n",
+ img_ptr, part->size / 1024,
+ iis.imageNumber, part->name);
+
+ return 0;
+}
+
+static int afs_parse_v2_partition(struct mtd_info *mtd,
+ u_int off, struct mtd_partition *part)
+{
+ u_int ptr;
+ u32 footer[12];
+ u32 imginfo[36];
+ char *name;
+ u32 version;
+ u32 entrypoint;
+ u32 attributes;
+ u32 region_count;
+ u32 block_start;
+ u32 block_end;
+ u32 crc;
+ size_t sz;
+ int ret;
+ int i;
+ int pad = 0;
+
+ pr_debug("Parsing v2 partition @%08x-%08x\n",
+ off, off + mtd->erasesize);
+
+ /* First read the footer */
+ ptr = off + mtd->erasesize - sizeof(footer);
+ ret = mtd_read(mtd, ptr, sizeof(footer), &sz, (u_char *)footer);
+ if ((ret < 0) || (ret >= 0 && sz != sizeof(footer))) {
+ pr_err("AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return -EIO;
+ }
+ name = (char *) &footer[0];
+ version = footer[9];
+ ptr = off + mtd->erasesize - sizeof(footer) - footer[8];
+
+ pr_debug("found image \"%s\", version %08x, info @%08x\n",
+ name, version, ptr);
+
+ /* Then read the image information */
+ ret = mtd_read(mtd, ptr, sizeof(imginfo), &sz, (u_char *)imginfo);
+ if ((ret < 0) || (ret >= 0 && sz != sizeof(imginfo))) {
+ pr_err("AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return -EIO;
+ }
+
+ /* 32bit platforms have 4 bytes padding */
+ crc = word_sum_v2(&imginfo[1], 34);
+ if (!crc) {
+ pr_debug("Padding 1 word (4 bytes)\n");
+ pad = 1;
+ } else {
+ /* 64bit platforms have 8 bytes padding */
+ crc = word_sum_v2(&imginfo[2], 34);
+ if (!crc) {
+ pr_debug("Padding 2 words (8 bytes)\n");
+ pad = 2;
+ }
+ }
+ if (crc) {
+ pr_err("AFS: bad checksum on v2 image info: %08x\n", crc);
+ return -EINVAL;
+ }
+ entrypoint = imginfo[pad];
+ attributes = imginfo[pad+1];
+ region_count = imginfo[pad+2];
+ block_start = imginfo[20];
+ block_end = imginfo[21];
+
+ pr_debug("image entry=%08x, attr=%08x, regions=%08x, "
+ "bs=%08x, be=%08x\n",
+ entrypoint, attributes, region_count,
+ block_start, block_end);
+
+ for (i = 0; i < region_count; i++) {
+ u32 region_load_addr = imginfo[pad + 3 + i*4];
+ u32 region_size = imginfo[pad + 4 + i*4];
+ u32 region_offset = imginfo[pad + 5 + i*4];
+ u32 region_start;
+ u32 region_end;
+
+ pr_debug(" region %d: address: %08x, size: %08x, "
+ "offset: %08x\n",
+ i,
+ region_load_addr,
+ region_size,
+ region_offset);
+
+ region_start = off + region_offset;
+ region_end = region_start + region_size;
+ /* Align partition to end of erase block */
+ region_end += (mtd->erasesize - 1);
+ region_end &= ~(mtd->erasesize -1);
+ pr_debug(" partition start = %08x, partition end = %08x\n",
+ region_start, region_end);
+
+ /* Create one partition per region */
+ part->name = kstrdup(name, GFP_KERNEL);
+ if (!part->name)
+ return -ENOMEM;
+ part->offset = region_start;
+ part->size = region_end - region_start;
+ part->mask_flags = 0;
+ }
+
+ return 0;
+}
+
+static int parse_afs_partitions(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ u_int off, sz;
+ int ret = 0;
+ int i;
+
+ /* Count the partitions by looping over all erase blocks */
+ for (i = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
+ if (afs_is_v1(mtd, off)) {
+ sz += sizeof(struct mtd_partition);
+ i += 1;
+ }
+ if (afs_is_v2(mtd, off)) {
+ sz += sizeof(struct mtd_partition);
+ i += 1;
+ }
+ }
+
+ if (!i)
+ return 0;
+
+ parts = kzalloc(sz, GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ /*
+ * Identify the partitions
+ */
+ for (i = off = 0; off < mtd->size; off += mtd->erasesize) {
+ if (afs_is_v1(mtd, off)) {
+ ret = afs_parse_v1_partition(mtd, off, &parts[i]);
+ if (ret)
+ goto out_free_parts;
+ i++;
+ }
+ if (afs_is_v2(mtd, off)) {
+ ret = afs_parse_v2_partition(mtd, off, &parts[i]);
+ if (ret)
+ goto out_free_parts;
+ i++;
+ }
+ }
+
+ *pparts = parts;
+ return i;
+
+out_free_parts:
+ while (i >= 0) {
+ if (parts[i].name)
+ kfree(parts[i].name);
+ i--;
+ }
+ kfree(parts);
+ *pparts = NULL;
+ return ret;
+}
+
+static const struct of_device_id mtd_parser_afs_of_match_table[] = {
+ { .compatible = "arm,arm-firmware-suite" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_afs_of_match_table);
+
+static struct mtd_part_parser afs_parser = {
+ .parse_fn = parse_afs_partitions,
+ .name = "afs",
+ .of_match_table = mtd_parser_afs_of_match_table,
+};
+module_mtd_part_parser(afs_parser);
+
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
new file mode 100644
index 000000000000..9537c183a3be
--- /dev/null
+++ b/drivers/mtd/parsers/parser_imagetag.c
@@ -0,0 +1,222 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011-2013 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcm963xx_tag.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+/* Ensure strings read from flash structs are null terminated */
+#define STR_NULL_TERMINATE(x) \
+ do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
+
+static int bcm963xx_read_imagetag(struct mtd_info *master, const char *name,
+ loff_t tag_offset, struct bcm_tag *buf)
+{
+ int ret;
+ size_t retlen;
+ u32 computed_crc;
+
+ ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
+ if (ret)
+ return ret;
+
+ if (retlen != sizeof(*buf))
+ return -EIO;
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ STR_NULL_TERMINATE(buf->board_id);
+ STR_NULL_TERMINATE(buf->tag_version);
+
+ pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
+ name, tag_offset, buf->tag_version, buf->board_id);
+
+ return 0;
+ }
+
+ pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
+ name, tag_offset, buf->header_crc, computed_crc);
+ return -EINVAL;
+}
+
+static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 0, curpart = 0;
+ struct bcm_tag *buf = NULL;
+ struct mtd_partition *parts;
+ int ret;
+ unsigned int rootfsaddr, kerneladdr, spareaddr, offset;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ int i;
+ bool rootfs_first = false;
+
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = bcm963xx_read_imagetag(master, "rootfs", 0, buf);
+ if (!ret) {
+ STR_NULL_TERMINATE(buf->flash_image_start);
+ if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
+ rootfsaddr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid rootfs address: %*ph\n",
+ (int)sizeof(buf->flash_image_start),
+ buf->flash_image_start);
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->kernel_address);
+ if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
+ kerneladdr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid kernel address: %*ph\n",
+ (int)sizeof(buf->kernel_address),
+ buf->kernel_address);
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->kernel_length);
+ if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
+ pr_err("invalid kernel length: %*ph\n",
+ (int)sizeof(buf->kernel_length),
+ buf->kernel_length);
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->total_length);
+ if (kstrtouint(buf->total_length, 10, &totallen)) {
+ pr_err("invalid total length: %*ph\n",
+ (int)sizeof(buf->total_length),
+ buf->total_length);
+ goto out;
+ }
+
+ /*
+ * Addresses are flash absolute, so convert to partition
+ * relative addresses. Assume either kernel or rootfs will
+ * directly follow the image tag.
+ */
+ if (rootfsaddr < kerneladdr)
+ offset = rootfsaddr - sizeof(struct bcm_tag);
+ else
+ offset = kerneladdr - sizeof(struct bcm_tag);
+
+ kerneladdr = kerneladdr - offset;
+ rootfsaddr = rootfsaddr - offset;
+ spareaddr = roundup(totallen, master->erasesize);
+
+ if (rootfsaddr < kerneladdr) {
+ /* default Broadcom layout */
+ rootfslen = kerneladdr - rootfsaddr;
+ rootfs_first = true;
+ } else {
+ /* OpenWrt layout */
+ rootfsaddr = kerneladdr + kernellen;
+ rootfslen = spareaddr - rootfsaddr;
+ }
+ } else {
+ goto out;
+ }
+ sparelen = master->size - spareaddr;
+
+ /* Determine number of partitions */
+ if (rootfslen > 0)
+ nrparts++;
+
+ if (kernellen > 0)
+ nrparts++;
+
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Start building partition list */
+ if (kernellen > 0) {
+ int kernelpart = curpart;
+
+ if (rootfslen > 0 && rootfs_first)
+ kernelpart++;
+ parts[kernelpart].name = "kernel";
+ parts[kernelpart].offset = kerneladdr;
+ parts[kernelpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ int rootfspart = curpart;
+
+ if (kernellen > 0 && rootfs_first)
+ rootfspart--;
+ parts[rootfspart].name = "rootfs";
+ parts[rootfspart].offset = rootfsaddr;
+ parts[rootfspart].size = rootfslen;
+ if (sparelen > 0 && !rootfs_first)
+ parts[rootfspart].size += sparelen;
+ curpart++;
+ }
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ ret = 0;
+
+out:
+ vfree(buf);
+
+ if (ret)
+ return ret;
+
+ return nrparts;
+}
+
+static const struct of_device_id parse_bcm963xx_imagetag_match_table[] = {
+ { .compatible = "brcm,bcm963xx-imagetag" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, parse_bcm963xx_imagetag_match_table);
+
+static struct mtd_part_parser bcm963xx_imagetag_parser = {
+ .parse_fn = bcm963xx_parse_imagetag_partitions,
+ .name = "bcm963xx-imagetag",
+ .of_match_table = parse_bcm963xx_imagetag_match_table,
+};
+module_mtd_part_parser(bcm963xx_imagetag_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com>");
+MODULE_DESCRIPTION("MTD parser for BCM963XX CFE Image Tag partitions");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 89227b1d036a..e0955a98a0f4 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -222,17 +222,17 @@ static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
uint8_t ecc[3];
__nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC)) < 0)
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
return -EIO;
buffer += SM_SMALL_PAGE;
__nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC)) < 0)
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
return -EIO;
return 0;
}
@@ -399,11 +399,11 @@ restart:
if (ftl->smallpagenand) {
__nand_calculate_ecc(buf + boffset, SM_SMALL_PAGE,
oob.ecc1,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
__nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
SM_SMALL_PAGE, oob.ecc2,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
}
if (!sm_write_sector(ftl, zone, block, boffset,
buf + boffset, &oob))
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 872b40922608..bfbfc17ed6aa 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -63,6 +63,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index af0a22019516..d60cbf23d9aa 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ /* Read cannot cross 4K boundary */
+ block_size = min_t(loff_t, from + block_size,
+ round_up(from + 1, SZ_4K)) - from;
+
writel(from, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
@@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ /* Write cannot cross 4K boundary */
+ block_size = min_t(loff_t, to + block_size,
+ round_up(to + 1, SZ_4K)) - to;
+
writel(to, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index fae147452aff..73172d7f512b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -744,7 +744,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/*
- * Erase types are ordered by size, with the biggest erase type at
+ * Erase types are ordered by size, with the smallest erase type at
* index 0.
*/
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
@@ -1905,7 +1905,9 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | USE_CLSR) },
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
@@ -2071,8 +2073,8 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
return &spi_nor_ids[tmp];
}
}
- dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
- id[0], id[1], id[2]);
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
+ SPI_NOR_MAX_ID_LEN, id);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index c71523e94580..73b06304c975 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -21,7 +21,7 @@
* or detected.
*/
-#if IS_ENABLED(CONFIG_MTD_NAND)
+#if IS_ENABLED(CONFIG_MTD_RAW_NAND)
struct nand_ecc_test {
const char *name;
@@ -122,9 +122,9 @@ static int no_bit_error_verify(void *error_data, void *error_ecc,
int ret;
__nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (ret == 0 && !memcmp(correct_data, error_data, size))
return 0;
@@ -152,9 +152,9 @@ static int single_bit_error_correct(void *error_data, void *error_ecc,
int ret;
__nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
if (ret == 1 && !memcmp(correct_data, error_data, size))
return 0;
@@ -189,9 +189,9 @@ static int double_bit_error_detect(void *error_data, void *error_ecc,
int ret;
__nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
return (ret == -EBADMSG) ? 0 : -EINVAL;
}
@@ -266,7 +266,7 @@ static int nand_ecc_test_run(const size_t size)
prandom_bytes(correct_data, size);
__nand_calculate_ecc(correct_data, size, correct_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
nand_ecc_test[i].prepare(error_data, error_ecc,
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2709dc02fc24..1f56c655832b 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1475,7 +1475,7 @@ static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
*/
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
{
- int err;
+ int err = 0;
struct ubi_wl_entry *e;
if (pnum < 0 || pnum >= ubi->peb_count) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index da1fc17295d9..b996967af8d9 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1098,13 +1098,6 @@ static int bond_option_arp_validate_set(struct bonding *bond,
{
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
-
- if (bond->dev->flags & IFF_UP) {
- if (!newval->value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
- }
bond->params.arp_validate = newval->value;
return 0;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 37ebd890ef51..9e06dff619c3 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -871,7 +871,7 @@ static int emac_probe(struct platform_device *pdev)
/* Read MAC-address from DT */
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, mac_addr);
/* Check if the MAC address is valid, if not get a random one */
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 7f89ad5c336d..13a1d99b29c6 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -961,7 +961,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
mac_addr = of_get_mac_address(dev->of_node);
if (!IS_ERR(mac_addr))
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, mac_addr);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 15b1130aa4ae..0e5de88fd6e8 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1504,7 +1504,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
if (!IS_ERR(mac))
- memcpy(netdev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr, mac);
else
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index e9a0213b08c4..6238e6951336 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -41,7 +41,7 @@ config CS89x0_PLATFORM
config EP93XX_ETH
tristate "EP93xx Ethernet support"
- depends on ARM && ARCH_EP93XX
+ depends on (ARM && ARCH_EP93XX) || COMPILE_TEST
select MII
help
This is a driver for the ethernet hardware included in EP93xx CPUs.
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 13dfdfca49fc..a6da9873570b 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -25,7 +25,7 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/hardware.h>
+#include <linux/platform_data/eth-ep93xx.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 953ee5616801..5e1aff9a5fd6 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1413,7 +1413,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
- memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
+ ether_addr_copy(pdata->dev_addr, mac_addr);
return pdata;
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7b7e526869a7..30cdb246d020 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -903,7 +903,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
*/
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr)) {
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, mac_addr);
} else {
struct mpc52xx_fec __iomem *fec = priv->fec;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 9cd2c28d17df..7ab8095db192 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -729,7 +729,7 @@ static int mac_probe(struct platform_device *_of_dev)
err = -EINVAL;
goto _return_of_get_parent;
}
- memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+ ether_addr_copy(mac_dev->addr, mac_addr);
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 90ea7a115d0f..5fad73b2e123 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1015,7 +1015,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
mac_addr = of_get_mac_address(ofdev->dev.of_node);
if (!IS_ERR(mac_addr))
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, mac_addr);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index df13c693b038..e670cd293dba 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -873,7 +873,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, mac_addr);
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 216e99af2b5a..4d6892d2f0a4 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3911,7 +3911,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, mac_addr);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b398d6c94dbd..3dcd9c3d8781 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -118,7 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static int ibmvnic_reset_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
+static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
@@ -849,11 +849,7 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (retry);
- /* handle pending MAC address changes after successful login */
- if (adapter->mac_change_pending) {
- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
- adapter->mac_change_pending = false;
- }
+ __ibmvnic_set_mac(netdev, adapter->mac_addr);
return 0;
}
@@ -1115,7 +1111,6 @@ static int ibmvnic_open(struct net_device *netdev)
}
rc = __ibmvnic_open(netdev);
- netif_carrier_on(netdev);
return rc;
}
@@ -1686,28 +1681,40 @@ static void ibmvnic_set_multi(struct net_device *netdev)
}
}
-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
+static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
union ibmvnic_crq crq;
int rc;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
+ if (!is_valid_ether_addr(dev_addr)) {
+ rc = -EADDRNOTAVAIL;
+ goto err;
+ }
memset(&crq, 0, sizeof(crq));
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
- ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+ ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
init_completion(&adapter->fw_done);
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto err;
+ }
+
wait_for_completion(&adapter->fw_done);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- return adapter->fw_done_rc ? -EIO : 0;
+ if (adapter->fw_done_rc) {
+ rc = -EIO;
+ goto err;
+ }
+
+ return 0;
+err:
+ ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
+ return rc;
}
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
@@ -1716,13 +1723,10 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int rc;
- if (adapter->state == VNIC_PROBED) {
- memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
- adapter->mac_change_pending = true;
- return 0;
- }
-
- rc = __ibmvnic_set_mac(netdev, addr);
+ rc = 0;
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
+ if (adapter->state != VNIC_PROBED)
+ rc = __ibmvnic_set_mac(netdev, addr->sa_data);
return rc;
}
@@ -1859,8 +1863,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
- netif_carrier_on(netdev);
-
return 0;
}
@@ -1930,8 +1932,6 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
return 0;
}
- netif_carrier_on(netdev);
-
return 0;
}
@@ -3937,8 +3937,8 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
goto out;
}
- memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
- ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr,
+ &crq->change_mac_addr_rsp.mac_addr[0]);
out:
complete(&adapter->fw_done);
return rc;
@@ -4475,6 +4475,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
crq->link_state_indication.phys_link_state;
adapter->logical_link_state =
crq->link_state_indication.logical_link_state;
+ if (adapter->phys_link_state && adapter->logical_link_state)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
break;
case CHANGE_MAC_ADDR_RSP:
netdev_dbg(netdev, "Got MAC address change Response\n");
@@ -4852,8 +4856,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->init_done);
adapter->resetting = false;
- adapter->mac_change_pending = false;
-
do {
rc = init_crq_queue(adapter);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index cffdac372a33..dcf2eb6d9290 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -969,7 +969,6 @@ struct ibmvnic_tunables {
u64 rx_entries;
u64 tx_entries;
u64 mtu;
- struct sockaddr mac;
};
struct ibmvnic_adapter {
@@ -1091,7 +1090,6 @@ struct ibmvnic_adapter {
bool resetting;
bool napi_enabled, from_passive_init;
- bool mac_change_pending;
bool failover_pending;
bool force_reset_recovery;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 07e254fc96ef..409b69fd4374 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2750,7 +2750,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
mac_addr = of_get_mac_address(pnp);
if (!IS_ERR(mac_addr))
- memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(ppd.mac_addr, mac_addr);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8186135883ed..e758650b2c26 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4565,7 +4565,7 @@ static int mvneta_probe(struct platform_device *pdev)
dt_mac_addr = of_get_mac_address(dn);
if (!IS_ERR(dt_mac_addr)) {
mac_from = "device tree";
- memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, dt_mac_addr);
} else {
mvneta_get_mac_addr(pp, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 56d43d9b43ef..d38952eb7aa9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5058,8 +5058,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
- if (mvpp22_rss_is_supported())
+ if (mvpp22_rss_is_supported()) {
dev->hw_features |= NETIF_F_RXHASH;
+ dev->features |= NETIF_F_NTUPLE;
+ }
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 9d070cca3e9e..5adf307fbbfd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4805,7 +4805,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
*/
iap = of_get_mac_address(hw->pdev->dev.of_node);
if (!IS_ERR(iap))
- memcpy(dev->dev_addr, iap, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, iap);
else
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 5aac97847721..23883d1fa22f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -291,6 +291,9 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
mlx5_fill_page_array(&eq->buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
+ if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
+ MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
+
MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index b6b3ff0fe17f..7ccb950aa7d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -22,7 +22,6 @@ config MLXSW_CORE_HWMON
config MLXSW_CORE_THERMAL
bool "Thermal zone support for Mellanox Technologies Switch ASICs"
depends on MLXSW_CORE && THERMAL
- depends on !(MLXSW_CORE=y && THERMAL=m)
default y
---help---
Say Y here if you want to automatically control fans speed according
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index b44172a901ed..ba4fdf1b0dea 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -426,7 +426,7 @@ static void ks8851_init_mac(struct ks8851_net *ks)
mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
if (!IS_ERR(mac_addr)) {
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, mac_addr);
ks8851_write_mac_addr(dev);
return;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index dc76b0d15234..e5c8412c08c1 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1328,7 +1328,7 @@ static int ks8851_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
mac = of_get_mac_address(pdev->dev.of_node);
if (!IS_ERR(mac))
- memcpy(ks->mac_addr, mac, ETH_ALEN);
+ ether_addr_copy(ks->mac_addr, mac);
} else {
struct ks8851_mll_platform_data *pdata;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index da138edddd32..fec604c4c0d3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1369,7 +1369,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr)) {
const char *macaddr = of_get_mac_address(np);
if (!IS_ERR(macaddr))
- memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, macaddr);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 549be1c76a89..2e20334b76a1 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6992,8 +6992,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
- PCI_DEVID(pdev->bus->number, pdev->devfn));
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7c4e282242d5..6354f19a31eb 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3193,7 +3193,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
- memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(pdata->mac_addr, mac_addr);
pdata->no_ether_link =
of_property_read_bool(np, "renesas,no-ether-link");
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 70cce63a6081..696037d5ac3d 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -735,6 +735,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
sp = netdev_priv(dev);
/* Make private data page aligned */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 5b3b06a0a3bf..d466e33635b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -15,7 +15,7 @@
* Adopted from dwmac-sti.c
*/
-#include <linux/mfd/syscon.h>
+#include <linux/mfd/altera-sysmgr.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
@@ -114,7 +114,8 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
dwmac->interface = of_get_phy_mode(np);
- sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
+ sys_mgr_base_addr =
+ altr_sysmgr_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
if (IS_ERR(sys_mgr_base_addr)) {
dev_info(dev, "No sysmgr-syscon node found\n");
return PTR_ERR(sys_mgr_base_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 195669f550f0..ba124a4da793 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1015,6 +1015,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 26db6aa002d1..7cbc01f316fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -208,7 +208,7 @@ static int quark_default_data(struct pci_dev *pdev,
ret = 1;
}
- plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat->bus_id = pci_dev_id(pdev);
plat->phy_addr = ret;
plat->interface = PHY_INTERFACE_MODE_RMII;
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index c3f53a40b48f..ed12e1e5df2f 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -19,4 +19,4 @@ ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtoo
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o cpsw_ale.o
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
-keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
+keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b18eeb05b993..634fc484a0b3 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2233,7 +2233,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
no_phy_slave:
mac_addr = of_get_mac_address(slave_node);
if (!IS_ERR(mac_addr)) {
- memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(slave_data->mac_addr, mac_addr);
} else {
ret = ti_cm_get_macid(&pdev->dev, i,
slave_data->mac_addr);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 997475c209c0..47c45152132e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -361,7 +361,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
static int temac_init_mac_address(struct net_device *ndev, const void *address)
{
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
temac_do_set_mac_address(ndev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 691170753563..6886270da695 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1167,7 +1167,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (!IS_ERR(mac_address)) {
/* Set the MAC address. */
- memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, mac_address);
} else {
dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index ed6623a9801e..319db3ece263 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -31,14 +31,15 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <mach/ixp46x_ts.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
@@ -1497,6 +1498,15 @@ static struct platform_driver ixp4xx_eth_driver = {
static int __init eth_init_module(void)
{
int err;
+
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if ((err = ixp4xx_mdio_register()))
return err;
return platform_driver_register(&ixp4xx_eth_driver);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index b2ff903a9cb6..b188fce3f641 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -53,6 +53,7 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/ieee802154.h>
+#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
index 6fa29ea8e2a3..6644762ff2ab 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/phy/mdio-mux-meson-g12a.c
@@ -33,7 +33,7 @@
#define ETH_PLL_CTL7 0x60
#define ETH_PHY_CNTL0 0x80
-#define EPHY_G12A_ID 0x33000180
+#define EPHY_G12A_ID 0x33010180
#define ETH_PHY_CNTL1 0x84
#define PHY_CNTL1_ST_MODE GENMASK(2, 0)
#define PHY_CNTL1_ST_PHYADD GENMASK(7, 3)
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 761ce3b1e7bd..a669945eb829 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -217,12 +217,12 @@ static int rtl8211e_config_init(struct phy_device *phydev)
if (oldpage < 0)
goto err_restore_page;
- ret = phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
if (ret)
goto err_restore_page;
- ret = phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
- val);
+ ret = __phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
+ val);
err_restore_page:
return phy_restore_page(phydev, oldpage, ret);
@@ -275,6 +275,8 @@ static struct phy_driver realtek_drvs[] = {
.config_aneg = rtl8211_config_aneg,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
@@ -284,12 +286,16 @@ static struct phy_driver realtek_drvs[] = {
.write_mmd = &genphy_write_mmd_unsupported,
.suspend = rtl8211b_suspend,
.resume = rtl8211b_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc913),
.name = "RTL8211C Gigabit Ethernet",
.config_init = rtl8211c_config_init,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
@@ -297,6 +303,8 @@ static struct phy_driver realtek_drvs[] = {
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
@@ -305,6 +313,8 @@ static struct phy_driver realtek_drvs[] = {
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 5c60dc60a8e6..46a05b6540b8 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 04964937a3af..b7a49ae6b327 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -95,7 +95,7 @@ mt76_eeprom_override(struct mt76_dev *dev)
mac = of_get_mac_address(np);
if (!IS_ERR(mac))
- memcpy(dev->macaddr, mac, ETH_ALEN);
+ ether_addr_copy(dev->macaddr, mac);
#endif
if (!is_valid_ether_addr(dev->macaddr)) {
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index f3d753d3169c..2030805aa216 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -756,6 +756,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
return &guid_null;
}
+static void reap_victim(struct nd_mapping *nd_mapping,
+ struct nd_label_ent *victim)
+{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ u32 slot = to_slot(ndd, victim->label);
+
+ dev_dbg(ndd->dev, "free: %d\n", slot);
+ nd_label_free_slot(ndd, slot);
+ victim->label = NULL;
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
@@ -763,9 +774,9 @@ static int __pmem_label_update(struct nd_region *nd_region,
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex;
+ struct nd_label_ent *label_ent;
struct nd_label_id label_id;
struct resource *res;
unsigned long *free;
@@ -834,18 +845,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
- if (memcmp(nspm->uuid, label_ent->label->uuid,
- NSLABEL_UUID_LEN) != 0)
- continue;
- victim = label_ent;
- list_move_tail(&victim->list, &nd_mapping->labels);
- break;
- }
- if (victim) {
- dev_dbg(ndd->dev, "free: %d\n", slot);
- slot = to_slot(ndd, victim->label);
- nd_label_free_slot(ndd, slot);
- victim->label = NULL;
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
+ || memcmp(nspm->uuid, label_ent->label->uuid,
+ NSLABEL_UUID_LEN) == 0)
+ reap_victim(nd_mapping, label_ent);
}
/* update index */
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index f293556cbbf6..d0214644e334 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1247,12 +1247,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_label_ent *label_ent;
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s",
new_label_id.id);
+
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
+ struct nd_label_id label_id;
+
+ if (!nd_label)
+ continue;
+ nd_label_gen_id(&label_id, nd_label->uuid,
+ __le32_to_cpu(nd_label->flags));
+ if (strcmp(old_label_id.id, label_id.id) == 0)
+ set_bit(ND_LABEL_REAP, &label_ent->flags);
+ }
+ mutex_unlock(&nd_mapping->lock);
}
kfree(*old_uuid);
out:
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index a5ac3b240293..191d62af0e51 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -113,8 +113,12 @@ struct nd_percpu_lane {
spinlock_t lock;
};
+enum nd_label_flags {
+ ND_LABEL_REAP,
+};
struct nd_label_ent {
struct list_head list;
+ unsigned long flags;
struct nd_namespace_label *label;
};
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a6644a2c3ef7..7da80f375315 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1257,10 +1257,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0;
}
+ effects |= nvme_known_admin_effects(opcode);
if (ctrl->effects)
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- else
- effects = nvme_known_admin_effects(opcode);
/*
* For simplicity, IO to all namespaces is quiesced even if the command
@@ -2342,20 +2341,35 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
-static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
+ struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
- int count = 0;
- struct nvme_ctrl *ctrl;
+ struct nvme_ctrl *tmp;
+
+ lockdep_assert_held(&nvme_subsystems_lock);
+
+ list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DEAD)
+ continue;
+
+ if (tmp->cntlid == ctrl->cntlid) {
+ dev_err(ctrl->device,
+ "Duplicate cntlid %u with %s, rejecting\n",
+ ctrl->cntlid, dev_name(tmp->device));
+ return false;
+ }
- mutex_lock(&subsys->lock);
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
- if (ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DEAD)
- count++;
+ if ((id->cmic & (1 << 1)) ||
+ (ctrl->opts && ctrl->opts->discovery_nqn))
+ continue;
+
+ dev_err(ctrl->device,
+ "Subsystem does not support multiple controllers\n");
+ return false;
}
- mutex_unlock(&subsys->lock);
- return count;
+ return true;
}
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
@@ -2395,22 +2409,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
mutex_lock(&nvme_subsystems_lock);
found = __nvme_find_get_subsystem(subsys->subnqn);
if (found) {
- /*
- * Verify that the subsystem actually supports multiple
- * controllers, else bail out.
- */
- if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
- nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
- dev_err(ctrl->device,
- "ignoring ctrl due to duplicate subnqn (%s).\n",
- found->subnqn);
- nvme_put_subsystem(found);
- ret = -EINVAL;
- goto out_unlock;
- }
-
__nvme_release_subsystem(subsys);
subsys = found;
+
+ if (!nvme_validate_cntlid(subsys, ctrl, id)) {
+ ret = -EINVAL;
+ goto out_put_subsystem;
+ }
} else {
ret = device_add(&subsys->dev);
if (ret) {
@@ -2422,23 +2427,20 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
list_add_tail(&subsys->entry, &nvme_subsystems);
}
- ctrl->subsys = subsys;
- mutex_unlock(&nvme_subsystems_lock);
-
if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
dev_name(ctrl->device))) {
dev_err(ctrl->device,
"failed to create sysfs link from subsystem.\n");
- /* the transport driver will eventually put the subsystem */
- return -EINVAL;
+ goto out_put_subsystem;
}
- mutex_lock(&subsys->lock);
+ ctrl->subsys = subsys;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
- mutex_unlock(&subsys->lock);
-
+ mutex_unlock(&nvme_subsystems_lock);
return 0;
+out_put_subsystem:
+ nvme_put_subsystem(subsys);
out_unlock:
mutex_unlock(&nvme_subsystems_lock);
put_device(&subsys->dev);
@@ -3605,19 +3607,18 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
u32 aer_notice_type = (result & 0xff00) >> 8;
+ trace_nvme_async_event(ctrl, aer_notice_type);
+
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
- trace_nvme_async_event(ctrl, aer_notice_type);
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
- trace_nvme_async_event(ctrl, aer_notice_type);
queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
- trace_nvme_async_event(ctrl, aer_notice_type);
if (!ctrl->ana_log_buf)
break;
queue_work(nvme_wq, &ctrl->ana_work);
@@ -3696,10 +3697,10 @@ static void nvme_free_ctrl(struct device *dev)
__free_page(ctrl->discard_page);
if (subsys) {
- mutex_lock(&subsys->lock);
+ mutex_lock(&nvme_subsystems_lock);
list_del(&ctrl->subsys_entry);
- mutex_unlock(&subsys->lock);
sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
+ mutex_unlock(&nvme_subsystems_lock);
}
ctrl->ops->free_ctrl(ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 592d1e61ef7e..5838f7cd53ac 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -978,7 +978,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_DISABLE_SQFLOW)
static struct nvme_ctrl *
-nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
+nvmf_create_ctrl(struct device *dev, const char *buf)
{
struct nvmf_ctrl_options *opts;
struct nvmf_transport_ops *ops;
@@ -1073,7 +1073,7 @@ static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
goto out_unlock;
}
- ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
+ ctrl = nvmf_create_ctrl(nvmf_device, buf);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
goto out_unlock;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9544eb60f725..dd8169bbf0d2 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -202,7 +202,7 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-
+static struct workqueue_struct *nvme_fc_wq;
/*
* These items are short-term. They will eventually be moved into
@@ -2054,7 +2054,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
active = atomic_xchg(&ctrl->err_work_active, 1);
- if (!active && !schedule_work(&ctrl->err_work)) {
+ if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
atomic_set(&ctrl->err_work_active, 0);
WARN_ON(1);
}
@@ -3399,6 +3399,10 @@ static int __init nvme_fc_init_module(void)
{
int ret;
+ nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+ if (!nvme_fc_wq)
+ return -ENOMEM;
+
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3416,7 +3420,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- return ret;
+ goto out_destroy_wq;
}
/*
@@ -3440,6 +3444,9 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
+out_destroy_wq:
+ destroy_workqueue(nvme_fc_wq);
+
return ret;
}
@@ -3456,6 +3463,7 @@ static void __exit nvme_fc_exit_module(void)
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
+ destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 949e29e1d782..4f20a10b39d3 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -977,6 +977,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
geo->csecs = 1 << ns->lba_shift;
geo->sos = ns->ms;
geo->ext = ns->ext;
+ geo->mdts = ns->ctrl->max_hw_sectors;
dev->q = q;
memcpy(dev->name, disk_name, DISK_NAME_LEN);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5c9429d41120..499acf07d61a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -31,7 +31,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} else if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
- ctrl->cntlid, ns->head->instance);
+ ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3e4fb891a95a..2a8708c9ac18 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1296,6 +1296,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
switch (dev->ctrl.state) {
case NVME_CTRL_DELETING:
shutdown = true;
+ /* fall through */
case NVME_CTRL_CONNECTING:
case NVME_CTRL_RESETTING:
dev_warn_ratelimited(dev->ctrl.device,
@@ -2280,8 +2281,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
return ret;
}
dev->ctrl.tagset = &dev->tagset;
-
- nvme_dbbuf_set(dev);
} else {
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
@@ -2289,6 +2288,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
nvme_free_queues(dev, dev->online_queues);
}
+ nvme_dbbuf_set(dev);
return 0;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index e1824c2e0a1c..f383146e7d0f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -697,15 +697,6 @@ out_free_queues:
return ret;
}
-static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl,
- struct blk_mq_tag_set *set)
-{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-
- blk_mq_free_tag_set(set);
- nvme_rdma_dev_put(ctrl->device);
-}
-
static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
bool admin)
{
@@ -744,24 +735,9 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
ret = blk_mq_alloc_tag_set(set);
if (ret)
- goto out;
-
- /*
- * We need a reference on the device as long as the tag_set is alive,
- * as the MRs in the request structures need a valid ib_device.
- */
- ret = nvme_rdma_dev_get(ctrl->device);
- if (!ret) {
- ret = -EINVAL;
- goto out_free_tagset;
- }
+ return ERR_PTR(ret);
return set;
-
-out_free_tagset:
- blk_mq_free_tag_set(set);
-out:
- return ERR_PTR(ret);
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
@@ -769,7 +745,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
{
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -847,7 +823,7 @@ out_cleanup_queue:
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -862,7 +838,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
{
if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
}
@@ -903,7 +879,7 @@ out_cleanup_connect_q:
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 97d3c77365b8..e71502d141ed 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -167,6 +167,7 @@ TRACE_EVENT(nvme_async_event,
aer_name(NVME_AER_NOTICE_NS_CHANGED),
aer_name(NVME_AER_NOTICE_ANA),
aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_NOTICE_DISC_CHANGED),
aer_name(NVME_AER_ERROR),
aer_name(NVME_AER_SMART),
aer_name(NVME_AER_CSS),
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 490c8fcaec80..5893543918c8 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -16,6 +16,8 @@ struct zynqmp_nvmem_data {
struct nvmem_device *nvmem;
};
+static const struct zynqmp_eemi_ops *eemi_ops;
+
static int zynqmp_nvmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
@@ -23,9 +25,7 @@ static int zynqmp_nvmem_read(void *context, unsigned int offset,
int idcode, version;
struct zynqmp_nvmem_data *priv = context;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
-
- if (!eemi_ops || !eemi_ops->get_chipid)
+ if (!eemi_ops->get_chipid)
return -ENXIO;
ret = eemi_ops->get_chipid(&idcode, &version);
@@ -61,6 +61,10 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
priv->dev = dev;
econfig.dev = dev;
econfig.reg_read = zynqmp_nvmem_read;
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 9649cd53e955..6f1be80e8c4e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -52,39 +52,25 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
static const void *of_get_mac_addr_nvmem(struct device_node *np)
{
int ret;
- u8 mac[ETH_ALEN];
- struct property *pp;
+ const void *mac;
+ u8 nvmem_mac[ETH_ALEN];
struct platform_device *pdev = of_find_device_by_node(np);
if (!pdev)
return ERR_PTR(-ENODEV);
- ret = nvmem_get_mac_address(&pdev->dev, &mac);
- if (ret)
+ ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac);
+ if (ret) {
+ put_device(&pdev->dev);
return ERR_PTR(ret);
-
- pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL);
- if (!pp)
- return ERR_PTR(-ENOMEM);
-
- pp->name = "nvmem-mac-address";
- pp->length = ETH_ALEN;
- pp->value = devm_kmemdup(&pdev->dev, mac, ETH_ALEN, GFP_KERNEL);
- if (!pp->value) {
- ret = -ENOMEM;
- goto free;
}
- ret = of_add_property(np, pp);
- if (ret)
- goto free;
-
- return pp->value;
-free:
- devm_kfree(&pdev->dev, pp->value);
- devm_kfree(&pdev->dev, pp);
+ mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL);
+ put_device(&pdev->dev);
+ if (!mac)
+ return ERR_PTR(-ENOMEM);
- return ERR_PTR(ret);
+ return mac;
}
/**
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 657d642fcc67..28cdd8c0213a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -10,10 +10,10 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
-obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_ACPI) += pci-acpi.o
endif
+obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
obj-$(CONFIG_PCIEPORTBUS) += pcie/
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 5cb40b2518f9..495059d923f7 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -23,7 +23,7 @@ void pci_add_resource_offset(struct list_head *resources, struct resource *res,
entry = resource_list_create_entry(res, 0);
if (!entry) {
- printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
+ pr_err("PCI: can't add host bridge window %pR\n", res);
return;
}
@@ -288,8 +288,7 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
res->end = end;
res->flags &= ~IORESOURCE_UNSET;
orig_res.flags &= ~IORESOURCE_UNSET;
- pci_printk(KERN_DEBUG, dev, "%pR clipped to %pR\n",
- &orig_res, res);
+ pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
return true;
}
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 6ea74b1c0d94..a6ce1ee51b4c 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -103,15 +103,32 @@ config PCIE_SPEAR13XX
Say Y here if you want PCIe support on SPEAr13XX SoCs.
config PCI_KEYSTONE
- bool "TI Keystone PCIe controller"
- depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
+ bool
+
+config PCI_KEYSTONE_HOST
+ bool "PCI Keystone Host Mode"
+ depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select PCI_KEYSTONE
+ default y
help
- Say Y here if you want to enable PCI controller support on Keystone
- SoCs. The PCI controller on Keystone is based on DesignWare hardware
- and therefore the driver re-uses the DesignWare core functions to
- implement the driver.
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in host mode. The PCI controller on Keystone is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+ bool "PCI Keystone Endpoint Mode"
+ depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_KEYSTONE
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in endpoint mode. The PCI controller on Keystone is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index b5f3b83cc2b3..b085dfd4fab7 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -28,5 +28,6 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# depending on whether ACPI, the DT driver, or both are enabled.
ifdef CONFIG_PCI
+obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index ae84a69ae63a..419451efd58c 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -247,6 +247,7 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
@@ -406,7 +407,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
return &dra7xx_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
.get_features = dra7xx_pcie_get_features,
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 3d627f94a166..9b5cb5b70389 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -52,6 +52,7 @@ enum imx6_pcie_variants {
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
+#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
@@ -89,9 +90,8 @@ struct imx6_pcie {
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
-#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
-#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
/* PCIe Root Complex registers (memory-mapped) */
#define PCIE_RC_IMX6_MSI_CAP 0x50
@@ -104,34 +104,29 @@ struct imx6_pcie {
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
-#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
-#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
-#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
-#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
-#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
-#define PCIE_PHY_CTRL_DATA_LOC 0
-#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
-#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
-#define PCIE_PHY_CTRL_WR_LOC 18
-#define PCIE_PHY_CTRL_RD_LOC 19
+#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT BIT(17)
+#define PCIE_PHY_CTRL_WR BIT(18)
+#define PCIE_PHY_CTRL_RD BIT(19)
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
-#define PCIE_PHY_STAT_ACK_LOC 16
+#define PCIE_PHY_STAT_ACK BIT(16)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
-#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
+#define PCIE_PHY_ATEOVRD_EN BIT(2)
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
-#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
@@ -154,19 +149,19 @@ struct imx6_pcie {
#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
#define PHY_RX_OVRD_IN_LO 0x1005
-#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
-#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
- u32 val;
+ bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+ PCIE_PHY_STAT_ACK;
wait_counter++;
if (val == exp_val)
@@ -184,27 +179,27 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
u32 val;
int ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx6_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
{
struct dw_pcie *pci = imx6_pcie->pci;
- u32 val, phy_ctl;
+ u32 phy_ctl;
int ret;
ret = pcie_phy_wait_ack(imx6_pcie, addr);
@@ -212,23 +207,22 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
return ret;
/* assert Read signal */
- phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- *data = val & 0xffff;
+ *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx6_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
{
struct dw_pcie *pci = imx6_pcie->pci;
u32 var;
@@ -240,41 +234,41 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
if (ret)
return ret;
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* capture data */
- var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
/* deassert cap data */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
if (ret)
return ret;
/* assert wr signal */
- var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ var = PCIE_PHY_CTRL_WR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
/* deassert wr signal */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
if (ret)
return ret;
@@ -285,7 +279,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
- u32 tmp;
+ u16 tmp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return;
@@ -455,7 +449,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
* reset time is too short, cannot meet the requirement.
* add one ~10us delay here.
*/
- udelay(10);
+ usleep_range(10, 100);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
@@ -488,20 +482,14 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
u32 val;
- unsigned int retries;
struct device *dev = imx6_pcie->pci->dev;
- for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
-
- if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
- return;
-
- usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
- PHY_PLL_LOCK_WAIT_USLEEP_MAX);
- }
-
- dev_err(dev, "PCIe PLL lock timeout\n");
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
}
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -687,7 +675,7 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
int mult, div;
- u32 val;
+ u16 val;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return 0;
@@ -730,21 +718,6 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
-{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
- return -ETIMEDOUT;
-}
-
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -761,7 +734,7 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
}
dev_err(dev, "Speed change timeout\n");
- return -EINVAL;
+ return -ETIMEDOUT;
}
static void imx6_pcie_ltssm_enable(struct device *dev)
@@ -803,7 +776,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- ret = imx6_pcie_wait_for_link(imx6_pcie);
+ ret = dw_pcie_wait_for_link(pci);
if (ret)
goto err_reset_phy;
@@ -841,7 +814,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
}
/* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(imx6_pcie);
+ ret = dw_pcie_wait_for_link(pci);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
@@ -856,8 +829,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
err_reset_phy:
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
return ret;
}
@@ -993,17 +966,11 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
}
}
-static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
-{
- return (imx6_pcie->drvdata->variant == IMX7D ||
- imx6_pcie->drvdata->variant == IMX6SX);
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
@@ -1019,7 +986,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct pcie_port *pp = &imx6_pcie->pci->pp;
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_assert_core_reset(imx6_pcie);
@@ -1249,7 +1216,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1258,6 +1226,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX7D] = {
.variant = IMX7D,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX8MQ] = {
.variant = IMX8MQ,
@@ -1279,6 +1248,7 @@ static struct platform_driver imx6_pcie_driver = {
.of_match_table = imx6_pcie_of_match,
.suppress_bind_attrs = true,
.pm = &imx6_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 14f2b0b4ed5e..af677254a072 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -18,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
@@ -26,6 +28,7 @@
#include <linux/resource.h>
#include <linux/signal.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define PCIE_VENDORID_MASK 0xffff
@@ -44,28 +47,34 @@
#define CFG_TYPE1 BIT(24)
#define OB_SIZE 0x030
-#define SPACE0_REMOTE_CFG_OFFSET 0x1000
#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
#define OB_ENABLEN BIT(0)
#define OB_WIN_SIZE 8 /* 8MB */
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET 0x64
+#define PCIE_EP_IRQ_CLR 0x68
+#define INT_ENABLE BIT(0)
+
/* IRQ register defines */
#define IRQ_EOI 0x050
-#define IRQ_STATUS 0x184
-#define IRQ_ENABLE_SET 0x188
-#define IRQ_ENABLE_CLR 0x18c
#define MSI_IRQ 0x054
-#define MSI0_IRQ_STATUS 0x104
-#define MSI0_IRQ_ENABLE_SET 0x108
-#define MSI0_IRQ_ENABLE_CLR 0x10c
-#define IRQ_STATUS 0x184
+#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
#define MSI_IRQ_OFFSET 4
+#define IRQ_STATUS(n) (0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
+#define INTx_EN BIT(0)
+
#define ERR_IRQ_STATUS 0x1c4
#define ERR_IRQ_ENABLE_SET 0x1c8
#define ERR_AER BIT(5) /* ECRC error */
+#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
#define ERR_CORR BIT(3) /* Correctable error */
#define ERR_NONFATAL BIT(2) /* Non-fatal error */
@@ -74,25 +83,45 @@
#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
ERR_NONFATAL | ERR_FATAL | ERR_SYS)
-#define MAX_MSI_HOST_IRQS 8
/* PCIE controller device IDs */
#define PCIE_RC_K2HK 0xb008
#define PCIE_RC_K2E 0xb009
#define PCIE_RC_K2L 0xb00a
#define PCIE_RC_K2G 0xb00b
+#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
+
+#define EP 0x0
+#define LEG_EP 0x1
+#define RC 0x2
+
+#define EXP_CAP_ID_OFFSET 0x70
+
+#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK 0x3
+#define AM654_WIN_SIZE SZ_64K
+
+#define APP_ADDR_SPACE_0 (16 * SZ_1K)
+
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ unsigned int version;
+};
+
struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int num_legacy_host_irqs;
int legacy_host_irqs[PCI_NUM_INTX];
struct device_node *legacy_intc_np;
- int num_msi_host_irqs;
- int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ int msi_host_irq;
int num_lanes;
u32 num_viewport;
struct phy **phy;
@@ -101,28 +130,12 @@ struct keystone_pcie {
struct irq_domain *legacy_irq_domain;
struct device_node *np;
- int error_irq;
-
/* Application register space */
void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
+ bool is_am6;
};
-static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
- u32 *bit_pos)
-{
- *reg_offset = offset % 8;
- *bit_pos = offset >> 3;
-}
-
-static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- return ks_pcie->app.start + MSI_IRQ;
-}
-
static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
return readl(ks_pcie->va_app_base + offset);
@@ -134,81 +147,114 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
writel(val, ks_pcie->va_app_base + offset);
}
-static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- u32 pending, vector;
- int src, virq;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ u32 reg_offset;
+ u32 bit_pos;
- pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
- /*
- * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
- * shows 1, 9, 17, 25 and so forth
- */
- for (src = 0; src < 4; src++) {
- if (BIT(src) & pending) {
- vector = offset + (src << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
- src, vector, virq);
- generic_handle_irq(virq);
- }
- }
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
-static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- u32 reg_offset, bit_pos;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
+ u64 msi_target;
pci = to_dw_pcie_from_pp(pp);
ks_pcie = to_keystone_pcie(pci);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
- BIT(bit_pos));
- ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+ msi_target = ks_pcie->app.start + MSI_IRQ;
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+ msg->data = data->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ return -EINVAL;
+}
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+static void ks_pcie_msi_mask(struct irq_data *data)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+static void ks_pcie_msi_unmask(struct irq_data *data)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
+static struct irq_chip ks_pcie_msi_irq_chip = {
+ .name = "KEYSTONE-PCI-MSI",
+ .irq_ack = ks_pcie_msi_irq_ack,
+ .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+ .irq_set_affinity = ks_pcie_msi_set_affinity,
+ .irq_mask = ks_pcie_msi_mask,
+ .irq_unmask = ks_pcie_msi_unmask,
+};
+
static int ks_pcie_msi_host_init(struct pcie_port *pp)
{
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
-static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
-{
- int i;
-
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
-}
-
static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
int offset)
{
@@ -217,7 +263,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
u32 pending;
int virq;
- pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
@@ -229,6 +275,14 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
}
+/*
+ * Dummy function so that DW core doesn't configure MSI
+ */
+static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
+{
+ return 0;
+}
+
static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
@@ -255,10 +309,10 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
if (reg & ERR_CORR)
dev_dbg(dev, "Correctable Error\n");
- if (reg & ERR_AXI)
+ if (!ks_pcie->is_am6 && (reg & ERR_AXI))
dev_err(dev, "AXI tag lookup fatal Error\n");
- if (reg & ERR_AER)
+ if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
dev_err(dev, "ECRC Error\n");
ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
@@ -356,6 +410,9 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
ks_pcie_clear_dbi_mode(ks_pcie);
+ if (ks_pcie->is_am6)
+ return;
+
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -445,68 +502,33 @@ static int ks_pcie_link_up(struct dw_pcie *pci)
return (val == PORT_LOGIC_LTSSM_STATE_L0);
}
-static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+static void ks_pcie_stop_link(struct dw_pcie *pci)
{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 val;
/* Disable Link training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-
- /* Initiate Link Training */
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}
-/**
- * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
- *
- * Ioremap the register resources, initialize legacy irq domain
- * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
- * PCI host controller.
- */
-static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
+static int ks_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
-
- /* Index 0 is the config reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /*
- * We set these same and is used in pcie rd/wr_other_conf
- * functions
- */
- pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- /* Index 1 is the application reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ks_pcie->va_app_base))
- return PTR_ERR(ks_pcie->va_app_base);
-
- ks_pcie->app = *res;
+ u32 val;
- /* Create legacy IRQ domain */
- ks_pcie->legacy_irq_domain =
- irq_domain_add_linear(ks_pcie->legacy_intc_np,
- PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops,
- NULL);
- if (!ks_pcie->legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
- return -EINVAL;
+ if (dw_pcie_link_up(pci)) {
+ dev_dbg(dev, "link is already up\n");
+ return 0;
}
- return dw_pcie_host_init(pp);
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+
+ return 0;
}
static void ks_pcie_quirk(struct pci_dev *dev)
@@ -552,34 +574,16 @@ static void ks_pcie_quirk(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
-static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_info(dev, "Link already up\n");
- return 0;
- }
-
- ks_pcie_initiate_link_train(ks_pcie);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- dev_err(dev, "phy link never came up\n");
- return -ETIMEDOUT;
-}
-
static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
+ unsigned int irq = desc->irq_data.hwirq;
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
- u32 offset = irq - ks_pcie->msi_host_irqs[0];
+ u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 vector, virq, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -589,7 +593,23 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_msi_irq(ks_pcie, offset);
+
+ reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (pos = 0; pos < 4; pos++) {
+ if (!(reg & BIT(pos)))
+ continue;
+
+ vector = offset + (pos << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
+ virq);
+ generic_handle_irq(virq);
+ }
+
chained_irq_exit(chip, desc);
}
@@ -622,89 +642,119 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
- char *controller, int *num_irqs)
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
{
- int temp, max_host_irqs, legacy = 1, *host_irqs;
struct device *dev = ks_pcie->pci->dev;
- struct device_node *np_pcie = dev->of_node, **np_temp;
-
- if (!strcmp(controller, "msi-interrupt-controller"))
- legacy = 0;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ struct irq_data *irq_data;
+ int irq_count, irq, ret, i;
- if (legacy) {
- np_temp = &ks_pcie->legacy_intc_np;
- max_host_irqs = PCI_NUM_INTX;
- host_irqs = &ks_pcie->legacy_host_irqs[0];
- } else {
- np_temp = &ks_pcie->msi_intc_np;
- max_host_irqs = MAX_MSI_HOST_IRQS;
- host_irqs = &ks_pcie->msi_host_irqs[0];
- }
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ return 0;
- /* interrupt controller is in a child node */
- *np_temp = of_get_child_by_name(np_pcie, controller);
- if (!(*np_temp)) {
- dev_err(dev, "Node for %s is absent\n", controller);
+ intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+ if (!intc_np) {
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "msi-interrupt-controller node is absent\n");
return -EINVAL;
}
- temp = of_irq_count(*np_temp);
- if (!temp) {
- dev_err(dev, "No IRQ entries in %s\n", controller);
- of_node_put(*np_temp);
- return -EINVAL;
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
}
- if (temp > max_host_irqs)
- dev_warn(dev, "Too many %s interrupts defined %u\n",
- (legacy ? "legacy" : "MSI"), temp);
-
- /*
- * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
- * 7 (MSI)
- */
- for (temp = 0; temp < max_host_irqs; temp++) {
- host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
- if (!host_irqs[temp])
- break;
- }
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
- of_node_put(*np_temp);
+ if (!ks_pcie->msi_host_irq) {
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->msi_host_irq = irq_data->hwirq;
+ }
- if (temp) {
- *num_irqs = temp;
- return 0;
+ irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+ ks_pcie);
}
- return -EINVAL;
+ of_node_put(intc_np);
+ return 0;
+
+err:
+ of_node_put(intc_np);
+ return ret;
}
-static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
{
- int i;
+ struct device *dev = ks_pcie->pci->dev;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ int irq_count, irq, ret = 0, i;
+
+ intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!intc_np) {
+ /*
+ * Since legacy interrupts are modeled as edge-interrupts in
+ * AM6, keep it disabled for now.
+ */
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
- /* Legacy IRQ */
- for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->legacy_host_irqs[i] = irq;
+
+ irq_set_chained_handler_and_data(irq,
ks_pcie_legacy_irq_handler,
ks_pcie);
}
- ks_pcie_enable_legacy_irqs(ks_pcie);
- /* MSI IRQ */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
- ks_pcie_msi_irq_handler,
- ks_pcie);
- }
+ legacy_irq_domain =
+ irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops, NULL);
+ if (!legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ ret = -EINVAL;
+ goto err;
}
+ ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
- if (ks_pcie->error_irq > 0)
- ks_pcie_enable_error_irq(ks_pcie);
+err:
+ of_node_put(intc_np);
+ return ret;
}
+#ifdef CONFIG_ARM
/*
* When a PCI device does not exist during config cycles, keystone host gets a
* bus error instead of returning 0xffffffff. This handler always returns 0
@@ -724,6 +774,7 @@ static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
return 0;
}
+#endif
static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
{
@@ -742,8 +793,10 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
if (ret)
return ret;
+ dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+ dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@@ -754,11 +807,18 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
int ret;
+ ret = ks_pcie_config_legacy_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ret = ks_pcie_config_msi_irq(ks_pcie);
+ if (ret)
+ return ret;
+
dw_pcie_setup_rc(pp);
- ks_pcie_establish_link(ks_pcie);
+ ks_pcie_stop_link(pci);
ks_pcie_setup_rc_app_regs(ks_pcie);
- ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -766,12 +826,17 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
if (ret < 0)
return ret;
+#ifdef CONFIG_ARM
/*
* PCIe access errors that result into OCP errors are caught by ARM as
* "External aborts"
*/
hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
+#endif
+
+ ks_pcie_start_link(pci);
+ dw_pcie_wait_for_link(pci);
return 0;
}
@@ -780,14 +845,15 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = {
.rd_other_conf = ks_pcie_rd_other_conf,
.wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
- .msi_set_irq = ks_pcie_msi_set_irq,
- .msi_clear_irq = ks_pcie_msi_clear_irq,
- .get_msi_addr = ks_pcie_get_msi_addr,
.msi_host_init = ks_pcie_msi_host_init,
- .msi_irq_ack = ks_pcie_msi_irq_ack,
.scan_bus = ks_pcie_v3_65_scan_bus,
};
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+ .host_init = ks_pcie_host_init,
+ .msi_host_init = ks_pcie_am654_msi_host_init,
+};
+
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
@@ -801,41 +867,17 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "legacy-interrupt-controller",
- &ks_pcie->num_legacy_host_irqs);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "msi-interrupt-controller",
- &ks_pcie->num_msi_host_irqs);
- if (ret)
- return ret;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
- /*
- * Index 0 is the platform interrupt for error interrupt
- * from RC. This is optional.
- */
- ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
- if (ks_pcie->error_irq <= 0)
- dev_info(dev, "no error IRQ defined\n");
- else {
- ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
- IRQF_SHARED, "pcie-error-irq", ks_pcie);
- if (ret < 0) {
- dev_err(dev, "failed to request error IRQ %d\n",
- ks_pcie->error_irq);
- return ret;
- }
- }
+ pp->va_cfg1_base = pp->va_cfg0_base;
- pp->ops = &ks_pcie_host_ops;
- ret = ks_pcie_dw_host_init(ks_pcie);
+ ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
@@ -844,18 +886,139 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
return 0;
}
-static const struct of_device_id ks_pcie_of_match[] = {
- {
- .type = "pci",
- .compatible = "ti,keystone-pcie",
- },
- { },
-};
+static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_read(base + reg, size, &val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+ return val;
+}
+
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_write(base + reg, size, val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+}
static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .start_link = ks_pcie_start_link,
+ .stop_link = ks_pcie_stop_link,
.link_up = ks_pcie_link_up,
+ .read_dbi2 = ks_pcie_am654_read_dbi2,
+ .write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ int flags;
+
+ ep->page_size = AM654_WIN_SIZE;
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ u8 int_pin;
+
+ int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+ if (int_pin == 0 || int_pin > 4)
+ return;
+
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+ INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+ mdelay(1);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+ INT_ENABLE);
+}
+
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[2] = SZ_1M,
+ .bar_fixed_size[3] = SZ_64K,
+ .bar_fixed_size[4] = 256,
+ .bar_fixed_size[5] = SZ_1M,
+ .align = SZ_1M,
};
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+ return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+ .ep_init = ks_pcie_am654_ep_init,
+ .raise_irq = ks_pcie_am654_raise_irq,
+ .get_features = &ks_pcie_am654_get_features,
+};
+
+static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = ks_pcie->pci;
+
+ ep = &pci->ep;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
int num_lanes = ks_pcie->num_lanes;
@@ -873,6 +1036,10 @@ static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
int num_lanes = ks_pcie->num_lanes;
for (i = 0; i < num_lanes; i++) {
+ ret = phy_reset(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
ret = phy_init(ks_pcie->phy[i]);
if (ret < 0)
goto err_phy;
@@ -895,20 +1062,161 @@ err_phy:
return ret;
}
+static int ks_pcie_set_mode(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+ val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+ enum dw_pcie_device_mode mode)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ mask = AM654_PCIE_DEV_TYPE_MASK;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ val = RC;
+ break;
+ case DW_PCIE_EP_TYPE:
+ val = EP;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
+{
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
+ if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= link_speed;
+ dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
+ val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
+ if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= link_speed;
+ dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
+ val);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+ .host_ops = &ks_pcie_host_ops,
+ .version = 0x365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+ .host_ops = &ks_pcie_am654_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
+ .version = 0x490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+ .ep_ops = &ks_pcie_am654_ep_ops,
+ .mode = DW_PCIE_EP_TYPE,
+ .version = 0x490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .data = &ks_pcie_rc_of_data,
+ .compatible = "ti,keystone-pcie",
+ },
+ {
+ .data = &ks_pcie_am654_rc_of_data,
+ .compatible = "ti,am654-pcie-rc",
+ },
+ {
+ .data = &ks_pcie_am654_ep_of_data,
+ .compatible = "ti,am654-pcie-ep",
+ },
+ { },
+};
+
static int __init ks_pcie_probe(struct platform_device *pdev)
{
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ const struct ks_pcie_of_data *data;
+ const struct of_device_id *match;
+ enum dw_pcie_device_mode mode;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
struct device_link **link;
+ struct gpio_desc *gpiod;
+ void __iomem *atu_base;
+ struct resource *res;
+ unsigned int version;
+ void __iomem *base;
u32 num_viewport;
struct phy **phy;
+ int link_speed;
u32 num_lanes;
char name[10];
int ret;
+ int irq;
int i;
+ match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
+ data = (struct ks_pcie_of_data *)match->data;
+ if (!data)
+ return -EINVAL;
+
+ version = data->version;
+ host_ops = data->host_ops;
+ ep_ops = data->ep_ops;
+ mode = data->mode;
+
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
@@ -917,12 +1225,38 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+ base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+ ks_pcie->is_am6 = true;
+
+ pci->dbi_base = base;
+ pci->dbi_base2 = base;
pci->dev = dev;
pci->ops = &ks_pcie_dw_pcie_ops;
+ pci->version = version;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "missing IRQ resource: %d\n", irq);
+ return irq;
+ }
- ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
- dev_err(dev, "unable to read *num-viewport* property\n");
+ dev_err(dev, "failed to request error IRQ %d\n",
+ irq);
return ret;
}
@@ -960,9 +1294,17 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ks_pcie->pci = pci;
ks_pcie->link = link;
ks_pcie->num_lanes = num_lanes;
- ks_pcie->num_viewport = num_viewport;
ks_pcie->phy = phy;
+ gpiod = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_link;
+ }
+
ret = ks_pcie_enable_phy(ks_pcie);
if (ret) {
dev_err(dev, "failed to enable phy\n");
@@ -977,9 +1319,79 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
- if (ret < 0)
- goto err_get_sync;
+ if (pci->version >= 0x480A) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ atu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(atu_base)) {
+ ret = PTR_ERR(atu_base);
+ goto err_get_sync;
+ }
+
+ pci->atu_base = atu_base;
+
+ ret = ks_pcie_am654_set_mode(dev, mode);
+ if (ret < 0)
+ goto err_get_sync;
+ } else {
+ ret = ks_pcie_set_mode(dev);
+ if (ret < 0)
+ goto err_get_sync;
+ }
+
+ link_speed = of_pci_get_max_link_speed(np);
+ if (link_speed < 0)
+ link_speed = 2;
+
+ ks_pcie_set_link_speed(pci, link_speed);
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ return ret;
+ }
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ks_pcie->num_viewport = num_viewport;
+ pci->pp.ops = host_ops;
+ ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ pci->ep.ops = ep_ops;
+ ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+
+ ks_pcie_enable_error_irq(ks_pcie);
return 0;
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index a42c9c3ae1cc..be61d96cc95e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -79,7 +79,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ce45bde29bf8..3a5fa26d5e56 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -201,6 +201,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp)
return -EINVAL;
}
+ of_node_put(msi_node);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 000000000000..3ab58f0584a8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi {
+ void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct al_pcie_acpi *pcie = cfg->priv;
+ void __iomem *dbi_base = pcie->dbi_base;
+
+ if (bus->number == cfg->busr.start) {
+ /*
+ * The DW PCIe core doesn't filter out transactions to other
+ * devices/functions on the root bus num, so we do this here.
+ */
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+ else
+ return dbi_base + where;
+ }
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct al_pcie_acpi *al_pcie;
+ struct resource *res;
+ int ret;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+ root->segment);
+ return ret;
+ }
+
+ dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+ al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(al_pcie->dbi_base)) {
+ long err = PTR_ERR(al_pcie->dbi_base);
+
+ dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n",
+ res, err);
+ return err;
+ }
+
+ cfg->priv = al_pcie;
+
+ return 0;
+}
+
+struct pci_ecam_ops al_pcie_ops = {
+ .bus_shift = 20,
+ .init = al_pcie_init,
+ .pci_ops = {
+ .map_bus = al_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index dba83abfe764..d00252bd8fae 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -444,7 +444,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 24f5a775ad34..2bf5a35c0570 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -46,16 +46,19 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
u8 cap_id, next_cap_ptr;
u16 reg;
+ if (!cap_ptr)
+ return 0;
+
reg = dw_pcie_readw_dbi(pci, cap_ptr);
- next_cap_ptr = (reg & 0xff00) >> 8;
cap_id = (reg & 0x00ff);
- if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
+ if (cap_id > PCI_CAP_ID_MAX)
return 0;
if (cap_id == cap)
return cap_ptr;
+ next_cap_ptr = (reg & 0xff00) >> 8;
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
@@ -67,9 +70,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
next_cap_ptr = (reg & 0x00ff);
- if (!next_cap_ptr)
- return 0;
-
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
@@ -397,6 +397,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
+ unsigned int aligned_offset;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
@@ -422,13 +423,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+ aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+ msg_addr = ((u64)msg_addr_upper) << 32 |
+ (msg_addr_lower & ~aligned_offset);
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
@@ -504,10 +507,32 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
pci_epc_mem_exit(epc);
}
+static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+ u32 header;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ while (pos) {
+ header = dw_pcie_readl_dbi(pci, pos);
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (!pos)
+ break;
+ }
+
+ return 0;
+}
+
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
+ int i;
int ret;
+ u32 reg;
void *addr;
+ unsigned int nbars;
+ unsigned int offset;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
@@ -517,10 +542,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
return -EINVAL;
}
- if (pci->iatu_unroll_enabled && !pci->atu_base) {
- dev_err(dev, "atu_base is not populated\n");
- return -EINVAL;
- }
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
@@ -595,6 +616,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
dw_pcie_setup(pci);
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 25087d3c9a82..77db32529319 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -126,18 +126,12 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
- if (pp->ops->get_msi_addr)
- msi_target = pp->ops->get_msi_addr(pp);
- else
- msi_target = (u64)pp->msi_data;
+ msi_target = (u64)pp->msi_data;
msg->address_lo = lower_32_bits(msi_target);
msg->address_hi = upper_32_bits(msi_target);
- if (pp->ops->get_msi_data)
- msg->data = pp->ops->get_msi_data(pp, d->hwirq);
- else
- msg->data = d->hwirq;
+ msg->data = d->hwirq;
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
(int)d->hwirq, msg->address_hi, msg->address_lo);
@@ -157,17 +151,13 @@ static void dw_pci_bottom_mask(struct irq_data *d)
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_clear_irq) {
- pp->ops->msi_clear_irq(pp, d->hwirq);
- } else {
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
- }
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -180,17 +170,13 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_set_irq) {
- pp->ops->msi_set_irq(pp, d->hwirq);
- } else {
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
- }
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -199,20 +185,12 @@ static void dw_pci_bottom_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
- unsigned long flags;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- raw_spin_lock_irqsave(&pp->lock, flags);
-
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
-
- if (pp->ops->msi_irq_ack)
- pp->ops->msi_irq_ack(d->hwirq, pp);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -245,7 +223,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, bit + i,
- &dw_pci_msi_bottom_irq_chip,
+ pp->msi_irq_chip,
pp, handle_edge_irq,
NULL, NULL);
@@ -298,25 +276,31 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
void dw_pcie_free_msi(struct pcie_port *pp)
{
- irq_set_chained_handler(pp->msi_irq, NULL);
- irq_set_handler_data(pp->msi_irq, NULL);
+ if (pp->msi_irq) {
+ irq_set_chained_handler(pp->msi_irq, NULL);
+ irq_set_handler_data(pp->msi_irq, NULL);
+ }
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
+
+ if (pp->msi_page)
+ __free_page(pp->msi_page);
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct page *page;
u64 msi_target;
- page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ pp->msi_page = alloc_page(GFP_KERNEL);
+ pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
dev_err(dev, "Failed to map MSI data\n");
- __free_page(page);
+ __free_page(pp->msi_page);
+ pp->msi_page = NULL;
return;
}
msi_target = (u64)pp->msi_data;
@@ -335,7 +319,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win, *tmp;
- struct pci_bus *bus, *child;
+ struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
int ret;
@@ -352,7 +336,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
dev_err(dev, "Missing *config* reg space\n");
}
- bridge = pci_alloc_host_bridge(0);
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -363,7 +347,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
if (ret)
- goto error;
+ return ret;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
@@ -407,8 +391,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_size(pp->cfg));
if (!pci->dbi_base) {
dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -419,8 +402,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(dev, "Error with ioremap in function\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -430,8 +412,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -439,7 +420,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
pci->num_viewport = 2;
- if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
+ if (pci_msi_enabled()) {
/*
* If a specific SoC driver needs to change the
* default number of vectors, it needs to implement
@@ -454,14 +435,16 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->num_vectors == 0) {
dev_err(dev,
"Invalid number of vectors\n");
- goto error;
+ return -EINVAL;
}
}
if (!pp->ops->msi_host_init) {
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
ret = dw_pcie_allocate_domains(pp);
if (ret)
- goto error;
+ return ret;
if (pp->msi_irq)
irq_set_chained_handler_and_data(pp->msi_irq,
@@ -470,14 +453,14 @@ int dw_pcie_host_init(struct pcie_port *pp)
} else {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
- goto error;
+ return ret;
}
}
if (pp->ops->host_init) {
ret = pp->ops->host_init(pp);
if (ret)
- goto error;
+ goto err_free_msi;
}
pp->root_bus_nr = pp->busn->start;
@@ -491,24 +474,25 @@ int dw_pcie_host_init(struct pcie_port *pp)
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
- goto error;
+ goto err_free_msi;
- bus = bridge->bus;
+ pp->root_bus = bridge->bus;
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_bus_size_bridges(pp->root_bus);
+ pci_bus_assign_resources(pp->root_bus);
- list_for_each_entry(child, &bus->children, node)
+ list_for_each_entry(child, &pp->root_bus->children, node)
pcie_bus_configure_settings(child);
- pci_bus_add_devices(bus);
+ pci_bus_add_devices(pp->root_bus);
return 0;
-error:
- pci_free_host_bridge(bridge);
+err_free_msi:
+ if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ dw_pcie_free_msi(pp);
return ret;
}
@@ -628,17 +612,6 @@ static struct pci_ops dw_pcie_ops = {
.write = dw_pcie_wr_conf,
};
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
- if (val == 0xffffffff)
- return 1;
-
- return 0;
-}
-
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val, ctrl, num_ctrls;
@@ -646,17 +619,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, pp->irq_mask[ctrl]);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
+ if (!pp->ops->msi_host_init) {
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ pp->irq_mask[ctrl] = ~0;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, pp->irq_mask[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, ~0);
+ }
}
/* Setup RC BARs */
@@ -690,14 +665,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
- /* Get iATU unroll support */
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
- dev_dbg(pci->dev, "iATU unroll: %s\n",
- pci->iatu_unroll_enabled ? "enabled" : "disabled");
-
- if (pci->iatu_unroll_enabled && !pci->atu_base)
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
-
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 932dbd0b34b6..b58fdcbc664b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -106,7 +106,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
return &dw_plat_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
.get_features = dw_plat_pcie_get_features,
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 31f6331ca46f..9d7c51c32b3b 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -14,12 +14,6 @@
#include "pcie-designware.h"
-/* PCIe Port Logic registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
-#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
-
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -89,6 +83,37 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
dev_err(pci->dev, "Write DBI address failed\n");
}
+u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ if (pci->ops->read_dbi2)
+ return pci->ops->read_dbi2(pci, base, reg, size);
+
+ ret = dw_pcie_read(base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "read DBI address failed\n");
+
+ return val;
+}
+
+void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops->write_dbi2) {
+ pci->ops->write_dbi2(pci, base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
+}
+
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
@@ -334,9 +359,20 @@ int dw_pcie_link_up(struct dw_pcie *pci)
if (pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
- return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
- (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
+ val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+ return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+ (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
+}
+
+static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xffffffff)
+ return 1;
+
+ return 0;
}
void dw_pcie_setup(struct dw_pcie *pci)
@@ -347,6 +383,16 @@ void dw_pcie_setup(struct dw_pcie *pci)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ if (pci->version >= 0x480A || (!pci->version &&
+ dw_pcie_iatu_unroll_enabled(pci))) {
+ pci->iatu_unroll_enabled = true;
+ if (!pci->atu_base)
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+ "enabled" : "disabled");
+
+
ret = of_property_read_u32(np, "num-lanes", &lanes);
if (ret)
lanes = 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 377f4c0b52da..b8993f2b78df 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -41,6 +41,9 @@
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+#define PCIE_PORT_DEBUG1 0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE BIT(17)
@@ -145,14 +148,9 @@ struct dw_pcie_host_ops {
int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 val);
int (*host_init)(struct pcie_port *pp);
- void (*msi_set_irq)(struct pcie_port *pp, int irq);
- void (*msi_clear_irq)(struct pcie_port *pp, int irq);
- phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
- u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
- void (*msi_irq_ack)(int irq, struct pcie_port *pp);
};
struct pcie_port {
@@ -179,8 +177,11 @@ struct pcie_port {
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
+ struct page *msi_page;
+ struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
+ struct pci_bus *root_bus;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -200,7 +201,7 @@ struct dw_pcie_ep_ops {
struct dw_pcie_ep {
struct pci_epc *epc;
- struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ep_ops *ops;
phys_addr_t phys_base;
size_t addr_size;
size_t page_size;
@@ -222,6 +223,10 @@ struct dw_pcie_ops {
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
+ u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size);
+ void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
@@ -238,6 +243,7 @@ struct dw_pcie {
struct pcie_port pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
+ unsigned int version;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -252,6 +258,10 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
size_t size);
void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
size_t size, u32 val);
+u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size);
+void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
@@ -295,12 +305,12 @@ static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
+ __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
+ return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4);
}
static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index a7f703556790..0ed235d560e3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1129,25 +1129,8 @@ err_deinit:
return ret;
}
-static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- /* the device class is not reported correctly from the register */
- if (where == PCI_CLASS_REVISION && size == 4) {
- *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
- *val &= 0xff; /* keep revision id */
- *val |= PCI_CLASS_BRIDGE_PCI << 16;
- return PCIBIOS_SUCCESSFUL;
- }
-
- return dw_pcie_read(pci->dbi_base + where, size, val);
-}
-
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.host_init = qcom_pcie_host_init,
- .rd_own_conf = qcom_pcie_rd_own_conf,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1309,6 +1292,12 @@ static const struct of_device_id qcom_pcie_match[] = {
{ }
};
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
+
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
.driver = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index d5dc40289cce..3f30ee4a00b3 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -270,6 +270,7 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct device_node *np = pci->dev->of_node;
struct device_node *np_intc;
+ int ret = 0;
np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
if (!np_intc) {
@@ -280,20 +281,24 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
pp->irq = irq_of_parse_and_map(np_intc, 0);
if (!pp->irq) {
dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_node;
}
priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
if (!priv->legacy_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
pp);
- return 0;
+out_put_node:
+ of_node_put(np_intc);
+ return ret;
}
static int uniphier_pcie_host_init(struct pcie_port *pp)
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index eb58dfdaba1b..134e0306ff00 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -794,6 +794,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
struct irq_chip *irq_chip;
+ int ret = 0;
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
@@ -806,8 +807,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
dev_name(dev));
if (!irq_chip->name) {
- of_node_put(pcie_intc_node);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put_node;
}
irq_chip->irq_mask = advk_pcie_irq_mask;
@@ -819,11 +820,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
&advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put_node;
}
- return 0;
+out_put_node:
+ of_node_put(pcie_intc_node);
+ return ret;
}
static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index dea3ec7592a2..75a2fb930d4b 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Simple, generic PCI host controller driver targetting firmware-initialised
+ * Simple, generic PCI host controller driver targeting firmware-initialised
* systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
*
* Copyright (C) 2014 ARM Limited
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 95441a35eceb..82acd6155adf 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1486,6 +1486,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
}
}
+/*
+ * Remove entries in sysfs pci slot directory.
+ */
+static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (!hpdev->pci_slot)
+ continue;
+ pci_destroy_slot(hpdev->pci_slot);
+ hpdev->pci_slot = NULL;
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1761,6 +1776,10 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
+
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
put_pcichild(hpdev);
}
@@ -1900,6 +1919,9 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
+ /* For the get_pcichild() in hv_pci_eject_device() */
+ put_pcichild(hpdev);
+ /* For the two refs got in new_pcichild_device() */
put_pcichild(hpdev);
put_pcichild(hpdev);
put_hvpcibus(hpdev->hbus);
@@ -2677,6 +2699,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_lock_rescan_remove();
pci_stop_root_bus(hbus->pci_bus);
pci_remove_root_bus(hbus->pci_bus);
+ hv_pci_remove_slots(hbus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_removed;
}
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index f4f53d092e00..464ba2538d52 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -231,9 +231,9 @@ struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
- unsigned long pages;
struct mutex lock;
- u64 phys;
+ void *virt;
+ dma_addr_t phys;
int irq;
};
@@ -1536,7 +1536,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
err = platform_get_irq_byname(pdev, "msi");
if (err < 0) {
dev_err(dev, "failed to get IRQ: %d\n", err);
- goto err;
+ goto free_irq_domain;
}
msi->irq = err;
@@ -1545,17 +1545,35 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(dev, "failed to request IRQ: %d\n", err);
- goto err;
+ goto free_irq_domain;
+ }
+
+ /* Though the PCIe controller can address >32-bit address space, to
+ * facilitate endpoints that support only 32-bit MSI target address,
+ * the mask is set to 32-bit to make sure that MSI target address is
+ * always a 32-bit address
+ */
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err < 0) {
+ dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
+ goto free_irq;
+ }
+
+ msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!msi->virt) {
+ dev_err(dev, "failed to allocate DMA memory for MSI\n");
+ err = -ENOMEM;
+ goto free_irq;
}
- /* setup AFI/FPCI range */
- msi->pages = __get_free_pages(GFP_KERNEL, 0);
- msi->phys = virt_to_phys((void *)msi->pages);
host->msi = &msi->chip;
return 0;
-err:
+free_irq:
+ free_irq(msi->irq, pcie);
+free_irq_domain:
irq_domain_remove(msi->domain);
return err;
}
@@ -1592,7 +1610,8 @@ static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
struct tegra_msi *msi = &pcie->msi;
unsigned int i, irq;
- free_pages(msi->pages, 0);
+ dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
+ DMA_ATTR_NO_KERNEL_MAPPING);
if (msi->irq > 0)
free_irq(msi->irq, pcie);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index cb3401a931f8..0a3f61be5625 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -367,7 +367,7 @@ static void iproc_msi_handler(struct irq_desc *desc)
/*
* Now go read the tail pointer again to see if there are new
- * oustanding events that came in during the above window.
+ * outstanding events that came in during the above window.
*/
} while (true);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index c20fd6bd68fd..e3ca46497470 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -60,6 +60,10 @@
#define APB_ERR_EN_SHIFT 0
#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+#define CFG_RD_SUCCESS 0
+#define CFG_RD_UR 1
+#define CFG_RD_CRS 2
+#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -289,6 +293,9 @@ enum iproc_pcie_reg {
IPROC_PCIE_IARR4,
IPROC_PCIE_IMAP4,
+ /* config read status */
+ IPROC_PCIE_CFG_RD_STATUS,
+
/* link status */
IPROC_PCIE_LINK_STATUS,
@@ -350,6 +357,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
[IPROC_PCIE_IMAP3] = 0xe08,
[IPROC_PCIE_IARR4] = 0xe68,
[IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_CFG_RD_STATUS] = 0xee0,
[IPROC_PCIE_LINK_STATUS] = 0xf0c,
[IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
@@ -474,10 +482,12 @@ static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
return (pcie->base + offset);
}
-static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
+static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
+ void __iomem *cfg_data_p)
{
int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
unsigned int data;
+ u32 status;
/*
* As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
@@ -498,6 +508,15 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
+ /*
+ * CRS state is set in CFG_RD status register
+ * This will handle the case where CFG_RETRY_STATUS is
+ * valid config data.
+ */
+ status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
+ if (status != CFG_RD_CRS)
+ return data;
+
udelay(1);
data = readl(cfg_data_p);
}
@@ -576,7 +595,7 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
if (!cfg_data_p)
return PCIBIOS_DEVICE_NOT_FOUND;
- data = iproc_pcie_cfg_retry(cfg_data_p);
+ data = iproc_pcie_cfg_retry(pcie, cfg_data_p);
*val = data;
if (size <= 2)
@@ -936,8 +955,25 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
resource_size_t window_size =
ob_map->window_sizes[size_idx] * SZ_1M;
- if (size < window_size)
- continue;
+ /*
+ * Keep iterating until we reach the last window and
+ * with the minimal window size at index zero. In this
+ * case, we take a compromise by mapping it using the
+ * minimum window size that can be supported
+ */
+ if (size < window_size) {
+ if (size_idx > 0 || window_idx > 0)
+ continue;
+
+ /*
+ * For the corner case of reaching the minimal
+ * window size that can be supported on the
+ * last window
+ */
+ axi_addr = ALIGN_DOWN(axi_addr, window_size);
+ pci_addr = ALIGN_DOWN(pci_addr, window_size);
+ size = window_size;
+ }
if (!IS_ALIGNED(axi_addr, window_size) ||
!IS_ALIGNED(pci_addr, window_size)) {
@@ -1146,11 +1182,43 @@ err_ib:
return ret;
}
+static int iproc_pcie_add_dma_range(struct device *dev,
+ struct list_head *resources,
+ struct of_pci_range *range)
+{
+ struct resource *res;
+ struct resource_entry *entry, *tmp;
+ struct list_head *head = resources;
+
+ res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ resource_list_for_each_entry(tmp, resources) {
+ if (tmp->res->start < range->cpu_addr)
+ head = &tmp->node;
+ }
+
+ res->start = range->cpu_addr;
+ res->end = res->start + range->size - 1;
+
+ entry = resource_list_create_entry(res, 0);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->offset = res->start - range->cpu_addr;
+ resource_list_add(entry, head);
+
+ return 0;
+}
+
static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct of_pci_range range;
struct of_pci_range_parser parser;
int ret;
+ LIST_HEAD(resources);
/* Get the dma-ranges from DT */
ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
@@ -1158,13 +1226,23 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
return ret;
for_each_of_pci_range(&parser, &range) {
+ ret = iproc_pcie_add_dma_range(pcie->dev,
+ &resources,
+ &range);
+ if (ret)
+ goto out;
/* Each range entry corresponds to an inbound mapping region */
ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
if (ret)
- return ret;
+ goto out;
}
+ list_splice_init(&resources, &host->dma_ranges);
+
return 0;
+out:
+ pci_free_resource_list(&resources);
+ return ret;
}
static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
@@ -1320,14 +1398,18 @@ static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
if (pcie->need_msi_steer) {
ret = iproc_pcie_msi_steer(pcie, msi_node);
if (ret)
- return ret;
+ goto out_put_node;
}
/*
* If another MSI controller is being used, the call below should fail
* but that is okay
*/
- return iproc_msi_init(pcie, msi_node);
+ ret = iproc_msi_init(pcie, msi_node);
+
+out_put_node:
+ of_node_put(msi_node);
+ return ret;
}
static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
@@ -1347,7 +1429,6 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB:
regs = iproc_pcie_reg_paxb;
- pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_ob_map;
@@ -1356,6 +1437,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB_V2:
regs = iproc_pcie_reg_paxb_v2;
+ pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_v2_ob_map;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 0b6c72804e03..80601e1b939e 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -578,6 +578,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops, port);
+ of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
return -ENODEV;
@@ -915,49 +916,29 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
/* sys_ck might be divided into the following parts in some chips */
snprintf(name, sizeof(name), "ahb_ck%d", slot);
- port->ahb_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->ahb_ck)) {
- if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->ahb_ck = NULL;
- }
+ port->ahb_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->ahb_ck))
+ return PTR_ERR(port->ahb_ck);
snprintf(name, sizeof(name), "axi_ck%d", slot);
- port->axi_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->axi_ck)) {
- if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->axi_ck = NULL;
- }
+ port->axi_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->axi_ck))
+ return PTR_ERR(port->axi_ck);
snprintf(name, sizeof(name), "aux_ck%d", slot);
- port->aux_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->aux_ck)) {
- if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->aux_ck = NULL;
- }
+ port->aux_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->aux_ck))
+ return PTR_ERR(port->aux_ck);
snprintf(name, sizeof(name), "obff_ck%d", slot);
- port->obff_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->obff_ck)) {
- if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->obff_ck = NULL;
- }
+ port->obff_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->obff_ck))
+ return PTR_ERR(port->obff_ck);
snprintf(name, sizeof(name), "pipe_ck%d", slot);
- port->pipe_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->pipe_ck)) {
- if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->pipe_ck = NULL;
- }
+ port->pipe_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->pipe_ck))
+ return PTR_ERR(port->pipe_ck);
snprintf(name, sizeof(name), "pcie-rst%d", slot);
port->reset = devm_reset_control_get_optional_exclusive(dev, name);
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index c8febb009454..f6a669a9af41 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -46,14 +46,15 @@
/* Transfer control */
#define PCIETCTLR 0x02000
-#define CFINIT 1
+#define DL_DOWN BIT(3)
+#define CFINIT BIT(0)
#define PCIETSTR 0x02004
-#define DATA_LINK_ACTIVE 1
+#define DATA_LINK_ACTIVE BIT(0)
#define PCIEERRFR 0x02020
#define UNSUPPORTED_REQUEST BIT(4)
#define PCIEMSIFR 0x02044
#define PCIEMSIALR 0x02048
-#define MSIFE 1
+#define MSIFE BIT(0)
#define PCIEMSIAUR 0x0204c
#define PCIEMSIIER 0x02050
@@ -94,6 +95,7 @@
#define MACCTLR 0x011058
#define SPEED_CHANGE BIT(24)
#define SCRAMBLE_DISABLE BIT(27)
+#define PMSR 0x01105c
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
#define SPCNGRSN BIT(31)
@@ -152,14 +154,13 @@ struct rcar_pcie {
struct rcar_msi msi;
};
-static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
- unsigned long reg)
+static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val,
+ unsigned int reg)
{
writel(val, pcie->base + reg);
}
-static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
- unsigned long reg)
+static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
{
return readl(pcie->base + reg);
}
@@ -171,7 +172,7 @@ enum {
static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
{
- int shift = 8 * (where & 3);
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
u32 val = rcar_pci_read_reg(pcie, where & ~3);
val &= ~(mask << shift);
@@ -181,7 +182,7 @@ static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
{
- int shift = 8 * (where & 3);
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
u32 val = rcar_pci_read_reg(pcie, where & ~3);
return val >> shift;
@@ -192,7 +193,7 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
unsigned char access_type, struct pci_bus *bus,
unsigned int devfn, int where, u32 *data)
{
- int dev, func, reg, index;
+ unsigned int dev, func, reg, index;
dev = PCI_SLOT(devfn);
func = PCI_FUNC(devfn);
@@ -281,12 +282,12 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
}
if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
+ *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
else if (size == 2)
- *val = (*val >> (8 * (where & 2))) & 0xffff;
+ *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
- dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
- bus->number, devfn, where, size, (unsigned long)*val);
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, *val);
return ret;
}
@@ -296,23 +297,24 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct rcar_pcie *pcie = bus->sysdata;
- int shift, ret;
+ unsigned int shift;
u32 data;
+ int ret;
ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
bus, devfn, where, &data);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
- dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
- bus->number, devfn, where, size, (unsigned long)val);
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, val);
if (size == 1) {
- shift = 8 * (where & 3);
+ shift = BITS_PER_BYTE * (where & 3);
data &= ~(0xff << shift);
data |= ((val & 0xff) << shift);
} else if (size == 2) {
- shift = 8 * (where & 2);
+ shift = BITS_PER_BYTE * (where & 2);
data &= ~(0xffff << shift);
data |= ((val & 0xffff) << shift);
} else
@@ -507,10 +509,10 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
}
static void phy_write_reg(struct rcar_pcie *pcie,
- unsigned int rate, unsigned int addr,
- unsigned int lane, unsigned int data)
+ unsigned int rate, u32 addr,
+ unsigned int lane, u32 data)
{
- unsigned long phyaddr;
+ u32 phyaddr;
phyaddr = WRITE_CMD |
((rate & 1) << RATE_POS) |
@@ -738,15 +740,15 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
while (reg) {
unsigned int index = find_first_bit(&reg, 32);
- unsigned int irq;
+ unsigned int msi_irq;
/* clear the interrupt */
rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
- irq = irq_find_mapping(msi->domain, index);
- if (irq) {
+ msi_irq = irq_find_mapping(msi->domain, index);
+ if (msi_irq) {
if (test_bit(index, msi->used))
- generic_handle_irq(irq);
+ generic_handle_irq(msi_irq);
else
dev_info(dev, "unhandled MSI\n");
} else {
@@ -890,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
- unsigned long base;
+ phys_addr_t base;
int err, i;
mutex_init(&msi->lock);
@@ -929,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
/* setup MSI data target */
msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ if (!msi->pages) {
+ err = -ENOMEM;
+ goto err;
+ }
base = virt_to_phys((void *)msi->pages);
- rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
- rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
+ rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
/* enable all MSI interrupts */
rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
@@ -1118,7 +1124,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
- unsigned int data;
+ u32 data;
int err;
int (*phy_init_fn)(struct rcar_pcie *);
struct pci_host_bridge *bridge;
@@ -1130,6 +1136,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
if (err)
@@ -1221,10 +1228,28 @@ err_free_bridge:
return err;
}
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+ struct rcar_pcie *pcie = dev_get_drvdata(dev);
+
+ if (rcar_pci_read_reg(pcie, PMSR) &&
+ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+ return 0;
+
+ /* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+ return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+ .resume_noirq = rcar_pcie_resume_noirq,
+};
+
static struct platform_driver rcar_pcie_driver = {
.driver = {
.name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
+ .pm = &rcar_pcie_pm_ops,
.suppress_bind_attrs = true,
},
.probe = rcar_pcie_probe,
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index a5d799e2dff2..d743b0a48988 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -350,7 +350,7 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
struct rockchip_pcie *rockchip = &ep->rockchip;
u32 r = ep->max_regions - 1;
u32 offset;
- u16 status;
+ u32 status;
u8 msg_code;
if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 1372d270764f..8d20f1793a61 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -724,6 +724,7 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
&intx_domain_ops, rockchip);
+ of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
return -EINVAL;
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 81538d77f790..3b031f00a94a 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -438,11 +438,10 @@ static const struct irq_domain_ops legacy_domain_ops = {
#ifdef CONFIG_PCI_MSI
static struct irq_chip nwl_msi_irq_chip = {
.name = "nwl_pcie:msi",
- .irq_enable = unmask_msi_irq,
- .irq_disable = mask_msi_irq,
- .irq_mask = mask_msi_irq,
- .irq_unmask = unmask_msi_irq,
-
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
static struct msi_domain_info nwl_msi_domain_info = {
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 9bd1a35cd5d8..5bf3af3b28e6 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = {
* xilinx_pcie_enable_msi - Enable MSI support
* @port: PCIe port information
*/
-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
{
phys_addr_t msg_addr;
port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ if (!port->msi_pages)
+ return -ENOMEM;
+
msg_addr = virt_to_phys((void *)port->msi_pages);
pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+
+ return 0;
}
/* INTx Functions */
@@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
+ int ret;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- xilinx_pcie_enable_msi(port);
+ ret = xilinx_pcie_enable_msi(port);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d0b91da49bf4..27806987e93b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -438,7 +438,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
epc_features = epf_test->epc_features;
base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
- test_reg_bar);
+ test_reg_bar, epc_features->align);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -453,7 +453,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- base = pci_epf_alloc_space(epf, bar_size[bar], bar);
+ base = pci_epf_alloc_space(epf, bar_size[bar], bar,
+ epc_features->align);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -591,6 +592,11 @@ static int __init pci_epf_test_init(void)
kpcitest_workqueue = alloc_workqueue("kpcitest",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!kpcitest_workqueue) {
+ pr_err("Failed to allocate the kpcitest work queue\n");
+ return -ENOMEM;
+ }
+
ret = pci_epf_register_driver(&test_driver);
if (ret) {
pr_err("Failed to register pci epf test driver --> %d\n", ret);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 8bfdcd291196..fb1306de8f40 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -109,10 +109,12 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* pci_epf_alloc_space() - allocate memory for the PCI EPF register space
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
+ * @align: alignment size for the allocation region
*
* Invoke to allocate memory for the PCI EPF register space.
*/
-void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ size_t align)
{
void *space;
struct device *dev = epf->epc->dev.parent;
@@ -120,7 +122,11 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
if (size < 128)
size = 128;
- size = roundup_pow_of_two(size);
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
if (!space) {
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 506e1d923a1f..8c51a04b8083 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -25,36 +25,21 @@
#include "../pcie/portdrv.h"
-#define MY_NAME "pciehp"
-
extern bool pciehp_poll_mode;
extern int pciehp_poll_time;
-extern bool pciehp_debug;
-
-#define dbg(format, arg...) \
-do { \
- if (pciehp_debug) \
- printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \
-} while (0)
-#define err(format, arg...) \
- printk(KERN_ERR "%s: " format, MY_NAME, ## arg)
-#define info(format, arg...) \
- printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
-#define warn(format, arg...) \
- printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
+/*
+ * Set CONFIG_DYNAMIC_DEBUG=y and boot with 'dyndbg="file pciehp* +p"' to
+ * enable debug messages.
+ */
#define ctrl_dbg(ctrl, format, arg...) \
- do { \
- if (pciehp_debug) \
- dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
- format, ## arg); \
- } while (0)
+ pci_dbg(ctrl->pcie->port, format, ## arg)
#define ctrl_err(ctrl, format, arg...) \
- dev_err(&ctrl->pcie->device, format, ## arg)
+ pci_err(ctrl->pcie->port, format, ## arg)
#define ctrl_info(ctrl, format, arg...) \
- dev_info(&ctrl->pcie->device, format, ## arg)
+ pci_info(ctrl->pcie->port, format, ## arg)
#define ctrl_warn(ctrl, format, arg...) \
- dev_warn(&ctrl->pcie->device, format, ## arg)
+ pci_warn(ctrl->pcie->port, format, ## arg)
#define SLOT_NAME_SIZE 10
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fc5366b50e95..6ad0d86762cb 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -17,6 +17,9 @@
* Dely Sy <dely.l.sy@intel.com>"
*/
+#define pr_fmt(fmt) "pciehp: " fmt
+#define dev_fmt pr_fmt
+
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -27,7 +30,6 @@
#include "../pci.h"
/* Global variables */
-bool pciehp_debug;
bool pciehp_poll_mode;
int pciehp_poll_time;
@@ -35,15 +37,11 @@ int pciehp_poll_time;
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
*/
-module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
-MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-#define PCIE_MODULE_NAME "pciehp"
-
static int set_attention_status(struct hotplug_slot *slot, u8 value);
static int get_power_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
@@ -182,14 +180,14 @@ static int pciehp_probe(struct pcie_device *dev)
if (!dev->port->subordinate) {
/* Can happen if we run out of bus numbers during probe */
- dev_err(&dev->device,
+ pci_err(dev->port,
"Hotplug bridge without secondary bus, ignoring\n");
return -ENODEV;
}
ctrl = pcie_init(dev);
if (!ctrl) {
- dev_err(&dev->device, "Controller initialization failed\n");
+ pci_err(dev->port, "Controller initialization failed\n");
return -ENODEV;
}
set_service_data(dev, ctrl);
@@ -307,7 +305,7 @@ static int pciehp_runtime_resume(struct pcie_device *dev)
#endif /* PM */
static struct pcie_port_service_driver hpdriver_portdrv = {
- .name = PCIE_MODULE_NAME,
+ .name = "pciehp",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_HP,
@@ -328,9 +326,9 @@ int __init pcie_hp_init(void)
int retval = 0;
retval = pcie_port_service_register(&hpdriver_portdrv);
- dbg("pcie_port_service_register = %d\n", retval);
+ pr_debug("pcie_port_service_register = %d\n", retval);
if (retval)
- dbg("Failure to register service\n");
+ pr_debug("Failure to register service\n");
return retval;
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 905282a8ddaa..631ced0ab28a 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -13,6 +13,8 @@
*
*/
+#define dev_fmt(fmt) "pciehp: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 6a2365cd794e..bd990e3371e3 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -12,6 +12,8 @@
* Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*/
+#define dev_fmt(fmt) "pciehp: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/jiffies.h>
@@ -46,7 +48,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
/* Installs the interrupt handler */
retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
- IRQF_SHARED, MY_NAME, ctrl);
+ IRQF_SHARED, "pciehp", ctrl);
if (retval)
ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
irq);
@@ -232,8 +234,8 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
delay -= step;
} while (delay > 0);
- if (count > 1 && pciehp_debug)
- printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
+ if (count > 1)
+ pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), count, step, l);
@@ -822,14 +824,11 @@ static inline void dbg_ctrl(struct controller *ctrl)
struct pci_dev *pdev = ctrl->pcie->port;
u16 reg16;
- if (!pciehp_debug)
- return;
-
- ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
+ ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
- ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
+ ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
- ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
+ ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16);
}
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index b9c1396db6fe..d17f3bf36f70 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -13,6 +13,8 @@
*
*/
+#define dev_fmt(fmt) "pciehp: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index e2356a9c7088..182f9e3443ee 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
if (rc == 0)
break;
}
+ of_node_put(parent);
return dn;
}
@@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
return np;
}
+/* Returns a device_node with its reference count incremented */
static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
{
struct device_node *dn;
@@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name)
rc = dlpar_add_phb(drc_name, dn);
break;
}
+ of_node_put(dn);
printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
exit:
@@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name)
rc = dlpar_remove_pci_slot(drc_name, dn);
break;
}
+ of_node_put(dn);
vm_unmap_aliases();
printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 5282aa3e33c5..93b4a945c55d 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -21,6 +21,7 @@
/* free up the memory used by a slot */
void dealloc_slot_struct(struct slot *slot)
{
+ of_node_put(slot->dn);
kfree(slot->name);
kfree(slot);
}
@@ -36,7 +37,7 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
goto error_slot;
- slot->dn = dn;
+ slot->dn = of_node_get(dn);
slot->index = drc_index;
slot->power_domain = power_domain;
slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 73986825d221..e039b740fe74 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1338,7 +1338,7 @@ irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
struct msi_desc *desc)
{
return (irq_hw_number_t)desc->msi_attrib.entry_nr |
- PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
+ pci_dev_id(dev) << 11 |
(pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
}
@@ -1508,7 +1508,7 @@ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
{
struct device_node *of_node;
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
@@ -1531,7 +1531,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
struct irq_domain *dom;
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
dom = of_msi_map_get_device_domain(&pdev->dev, rid);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 3d32da15c215..73d5adec0a28 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -15,6 +15,7 @@
#include <linux/of_pci.h>
#include "pci.h"
+#ifdef CONFIG_PCI
void pci_set_of_node(struct pci_dev *dev)
{
if (!dev->bus->dev.of_node)
@@ -31,10 +32,16 @@ void pci_release_of_node(struct pci_dev *dev)
void pci_set_bus_of_node(struct pci_bus *bus)
{
- if (bus->self == NULL)
- bus->dev.of_node = pcibios_get_phb_of_node(bus);
- else
- bus->dev.of_node = of_node_get(bus->self->dev.of_node);
+ struct device_node *node;
+
+ if (bus->self == NULL) {
+ node = pcibios_get_phb_of_node(bus);
+ } else {
+ node = of_node_get(bus->self->dev.of_node);
+ if (node && of_property_read_bool(node, "external-facing"))
+ bus->self->untrusted = true;
+ }
+ bus->dev.of_node = node;
}
void pci_release_bus_of_node(struct pci_bus *bus)
@@ -197,27 +204,6 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
- * This function will try to find the limitation of link speed by finding
- * a property called "max-link-speed" of the given device node.
- *
- * @node: device tree node with the max link speed information
- *
- * Returns the associated max link speed from DT, or a negative value if the
- * required property is not found or is invalid.
- */
-int of_pci_get_max_link_speed(struct device_node *node)
-{
- u32 max_link_speed;
-
- if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
- max_link_speed > 4)
- return -EINVAL;
-
- return max_link_speed;
-}
-EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
-
-/**
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
* is present and valid
*/
@@ -537,3 +523,25 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
return err;
}
+#endif /* CONFIG_PCI */
+
+/**
+ * This function will try to find the limitation of link speed by finding
+ * a property called "max-link-speed" of the given device node.
+ *
+ * @node: device tree node with the max link speed information
+ *
+ * Returns the associated max link speed from DT, or a negative value if the
+ * required property is not found or is invalid.
+ */
+int of_pci_get_max_link_speed(struct device_node *node)
+{
+ u32 max_link_speed;
+
+ if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
+ max_link_speed > 4)
+ return -EINVAL;
+
+ return max_link_speed;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index c52298d76e64..742928d0053e 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -275,6 +275,30 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
}
/*
+ * If we can't find a common upstream bridge take a look at the root
+ * complex and compare it to a whitelist of known good hardware.
+ */
+static bool root_complex_whitelist(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+ struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
+ unsigned short vendor, device;
+
+ if (!root)
+ return false;
+
+ vendor = root->vendor;
+ device = root->device;
+ pci_dev_put(root);
+
+ /* AMD ZEN host bridges can do peer to peer */
+ if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450)
+ return true;
+
+ return false;
+}
+
+/*
* Find the distance through the nearest common upstream bridge between
* two PCI devices.
*
@@ -317,13 +341,13 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
* In this case, a list of all infringing bridge addresses will be
* populated in acs_list (assuming it's non-null) for printk purposes.
*/
-static int upstream_bridge_distance(struct pci_dev *a,
- struct pci_dev *b,
+static int upstream_bridge_distance(struct pci_dev *provider,
+ struct pci_dev *client,
struct seq_buf *acs_list)
{
+ struct pci_dev *a = provider, *b = client, *bb;
int dist_a = 0;
int dist_b = 0;
- struct pci_dev *bb = NULL;
int acs_cnt = 0;
/*
@@ -354,6 +378,14 @@ static int upstream_bridge_distance(struct pci_dev *a,
dist_a++;
}
+ /*
+ * Allow the connection if both devices are on a whitelisted root
+ * complex, but add an arbitary large value to the distance.
+ */
+ if (root_complex_whitelist(provider) &&
+ root_complex_whitelist(client))
+ return 0x1000 + dist_a + dist_b;
+
return -1;
check_b_path_acs:
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e1949f7efd9c..c5e1a097d7e3 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -119,7 +119,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
}
static acpi_status decode_type0_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type0 *hpx0)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -132,16 +132,14 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record,
for (i = 2; i < 6; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t0 = &hpx->type0_data;
- hpx->t0->revision = revision;
- hpx->t0->cache_line_size = fields[2].integer.value;
- hpx->t0->latency_timer = fields[3].integer.value;
- hpx->t0->enable_serr = fields[4].integer.value;
- hpx->t0->enable_perr = fields[5].integer.value;
+ hpx0->revision = revision;
+ hpx0->cache_line_size = fields[2].integer.value;
+ hpx0->latency_timer = fields[3].integer.value;
+ hpx0->enable_serr = fields[4].integer.value;
+ hpx0->enable_perr = fields[5].integer.value;
break;
default:
- printk(KERN_WARNING
- "%s: Type 0 Revision %d record not supported\n",
+ pr_warn("%s: Type 0 Revision %d record not supported\n",
__func__, revision);
return AE_ERROR;
}
@@ -149,7 +147,7 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record,
}
static acpi_status decode_type1_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type1 *hpx1)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -162,15 +160,13 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record,
for (i = 2; i < 5; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t1 = &hpx->type1_data;
- hpx->t1->revision = revision;
- hpx->t1->max_mem_read = fields[2].integer.value;
- hpx->t1->avg_max_split = fields[3].integer.value;
- hpx->t1->tot_max_split = fields[4].integer.value;
+ hpx1->revision = revision;
+ hpx1->max_mem_read = fields[2].integer.value;
+ hpx1->avg_max_split = fields[3].integer.value;
+ hpx1->tot_max_split = fields[4].integer.value;
break;
default:
- printk(KERN_WARNING
- "%s: Type 1 Revision %d record not supported\n",
+ pr_warn("%s: Type 1 Revision %d record not supported\n",
__func__, revision);
return AE_ERROR;
}
@@ -178,7 +174,7 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record,
}
static acpi_status decode_type2_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type2 *hpx2)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -191,45 +187,102 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record,
for (i = 2; i < 18; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t2 = &hpx->type2_data;
- hpx->t2->revision = revision;
- hpx->t2->unc_err_mask_and = fields[2].integer.value;
- hpx->t2->unc_err_mask_or = fields[3].integer.value;
- hpx->t2->unc_err_sever_and = fields[4].integer.value;
- hpx->t2->unc_err_sever_or = fields[5].integer.value;
- hpx->t2->cor_err_mask_and = fields[6].integer.value;
- hpx->t2->cor_err_mask_or = fields[7].integer.value;
- hpx->t2->adv_err_cap_and = fields[8].integer.value;
- hpx->t2->adv_err_cap_or = fields[9].integer.value;
- hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
- hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
- hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
- hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
- hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
- hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
- hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
- hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
+ hpx2->revision = revision;
+ hpx2->unc_err_mask_and = fields[2].integer.value;
+ hpx2->unc_err_mask_or = fields[3].integer.value;
+ hpx2->unc_err_sever_and = fields[4].integer.value;
+ hpx2->unc_err_sever_or = fields[5].integer.value;
+ hpx2->cor_err_mask_and = fields[6].integer.value;
+ hpx2->cor_err_mask_or = fields[7].integer.value;
+ hpx2->adv_err_cap_and = fields[8].integer.value;
+ hpx2->adv_err_cap_or = fields[9].integer.value;
+ hpx2->pci_exp_devctl_and = fields[10].integer.value;
+ hpx2->pci_exp_devctl_or = fields[11].integer.value;
+ hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
+ hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
+ hpx2->sec_unc_err_sever_and = fields[14].integer.value;
+ hpx2->sec_unc_err_sever_or = fields[15].integer.value;
+ hpx2->sec_unc_err_mask_and = fields[16].integer.value;
+ hpx2->sec_unc_err_mask_or = fields[17].integer.value;
break;
default:
- printk(KERN_WARNING
- "%s: Type 2 Revision %d record not supported\n",
+ pr_warn("%s: Type 2 Revision %d record not supported\n",
__func__, revision);
return AE_ERROR;
}
return AE_OK;
}
-static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
+static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
+ union acpi_object *reg_fields)
+{
+ hpx3_reg->device_type = reg_fields[0].integer.value;
+ hpx3_reg->function_type = reg_fields[1].integer.value;
+ hpx3_reg->config_space_location = reg_fields[2].integer.value;
+ hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
+ hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
+ hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
+ hpx3_reg->dvsec_id = reg_fields[6].integer.value;
+ hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
+ hpx3_reg->match_offset = reg_fields[8].integer.value;
+ hpx3_reg->match_mask_and = reg_fields[9].integer.value;
+ hpx3_reg->match_value = reg_fields[10].integer.value;
+ hpx3_reg->reg_offset = reg_fields[11].integer.value;
+ hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
+ hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
+}
+
+static acpi_status program_type3_hpx_record(struct pci_dev *dev,
+ union acpi_object *record,
+ const struct hotplug_program_ops *hp_ops)
+{
+ union acpi_object *fields = record->package.elements;
+ u32 desc_count, expected_length, revision;
+ union acpi_object *reg_fields;
+ struct hpx_type3 hpx3;
+ int i;
+
+ revision = fields[1].integer.value;
+ switch (revision) {
+ case 1:
+ desc_count = fields[2].integer.value;
+ expected_length = 3 + desc_count * 14;
+
+ if (record->package.count != expected_length)
+ return AE_ERROR;
+
+ for (i = 2; i < expected_length; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+
+ for (i = 0; i < desc_count; i++) {
+ reg_fields = fields + 3 + i * 14;
+ parse_hpx3_register(&hpx3, reg_fields);
+ hp_ops->program_type3(dev, &hpx3);
+ }
+
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 3 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package, *record, *fields;
+ struct hpp_type0 hpx0;
+ struct hpp_type1 hpx1;
+ struct hpp_type2 hpx2;
u32 type;
int i;
- /* Clear the return buffer with zeros */
- memset(hpx, 0, sizeof(struct hotplug_params));
-
status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
if (ACPI_FAILURE(status))
return status;
@@ -257,22 +310,33 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
type = fields[0].integer.value;
switch (type) {
case 0:
- status = decode_type0_hpx_record(record, hpx);
+ memset(&hpx0, 0, sizeof(hpx0));
+ status = decode_type0_hpx_record(record, &hpx0);
if (ACPI_FAILURE(status))
goto exit;
+ hp_ops->program_type0(dev, &hpx0);
break;
case 1:
- status = decode_type1_hpx_record(record, hpx);
+ memset(&hpx1, 0, sizeof(hpx1));
+ status = decode_type1_hpx_record(record, &hpx1);
if (ACPI_FAILURE(status))
goto exit;
+ hp_ops->program_type1(dev, &hpx1);
break;
case 2:
- status = decode_type2_hpx_record(record, hpx);
+ memset(&hpx2, 0, sizeof(hpx2));
+ status = decode_type2_hpx_record(record, &hpx2);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ hp_ops->program_type2(dev, &hpx2);
+ break;
+ case 3:
+ status = program_type3_hpx_record(dev, record, hp_ops);
if (ACPI_FAILURE(status))
goto exit;
break;
default:
- printk(KERN_ERR "%s: Type %d record not supported\n",
+ pr_err("%s: Type %d record not supported\n",
__func__, type);
status = AE_ERROR;
goto exit;
@@ -283,14 +347,16 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
return status;
}
-static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
+static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package, *fields;
+ struct hpp_type0 hpp0;
int i;
- memset(hpp, 0, sizeof(struct hotplug_params));
+ memset(&hpp0, 0, sizeof(hpp0));
status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -311,12 +377,13 @@ static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
}
}
- hpp->t0 = &hpp->type0_data;
- hpp->t0->revision = 1;
- hpp->t0->cache_line_size = fields[0].integer.value;
- hpp->t0->latency_timer = fields[1].integer.value;
- hpp->t0->enable_serr = fields[2].integer.value;
- hpp->t0->enable_perr = fields[3].integer.value;
+ hpp0.revision = 1;
+ hpp0.cache_line_size = fields[0].integer.value;
+ hpp0.latency_timer = fields[1].integer.value;
+ hpp0.enable_serr = fields[2].integer.value;
+ hpp0.enable_perr = fields[3].integer.value;
+
+ hp_ops->program_type0(dev, &hpp0);
exit:
kfree(buffer.pointer);
@@ -328,7 +395,8 @@ exit:
* @dev - the pci_dev for which we want parameters
* @hpp - allocated by the caller
*/
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
+int pci_acpi_program_hp_params(struct pci_dev *dev,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
acpi_handle handle, phandle;
@@ -351,10 +419,10 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
* this pci dev.
*/
while (handle) {
- status = acpi_run_hpx(handle, hpp);
+ status = acpi_run_hpx(dev, handle, hp_ops);
if (ACPI_SUCCESS(status))
return 0;
- status = acpi_run_hpp(handle, hpp);
+ status = acpi_run_hpp(dev, handle, hp_ops);
if (ACPI_SUCCESS(status))
return 0;
if (acpi_is_root_bridge(handle))
@@ -366,7 +434,6 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
}
return -ENODEV;
}
-EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* pciehp_is_native - Check whether a hotplug port is handled by the OS
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 66f8a59fadbd..e408099fea52 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -66,20 +66,18 @@ static int __init pci_stub_init(void)
&class, &class_mask);
if (fields < 2) {
- printk(KERN_WARNING
- "pci-stub: invalid id string \"%s\"\n", id);
+ pr_warn("pci-stub: invalid ID string \"%s\"\n", id);
continue;
}
- printk(KERN_INFO
- "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
+ pr_info("pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
vendor, device, subvendor, subdevice, class, class_mask);
rc = pci_add_dynid(&stub_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- printk(KERN_WARNING
- "pci-stub: failed to add dynamic id (%d)\n", rc);
+ pr_warn("pci-stub: failed to add dynamic ID (%d)\n",
+ rc);
}
return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 25794c27c7a4..6d27475e39b2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1111,8 +1111,7 @@ legacy_io_err:
kfree(b->legacy_io);
b->legacy_io = NULL;
kzalloc_err:
- printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n");
- return;
+ dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
}
void pci_remove_legacy_files(struct pci_bus *b)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 766f5779db92..8abc843b1615 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -197,8 +197,8 @@ EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
/**
* pci_dev_str_match_path - test if a path string matches a device
- * @dev: the PCI device to test
- * @path: string to match the device against
+ * @dev: the PCI device to test
+ * @path: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) formatted as a
@@ -280,8 +280,8 @@ free_and_exit:
/**
* pci_dev_str_match - test if a string matches a device
- * @dev: the PCI device to test
- * @p: string to match the device against
+ * @dev: the PCI device to test
+ * @p: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) matches a specified
@@ -341,7 +341,7 @@ static int pci_dev_str_match(struct pci_dev *dev, const char *p,
} else {
/*
* PCI Bus, Device, Function IDs are specified
- * (optionally, may include a path of devfns following it)
+ * (optionally, may include a path of devfns following it)
*/
ret = pci_dev_str_match_path(dev, p, &p);
if (ret < 0)
@@ -425,7 +425,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
* Tell if a device supports a given PCI capability.
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
- * support it. Possible values for @cap:
+ * support it. Possible values for @cap include:
*
* %PCI_CAP_ID_PM Power Management
* %PCI_CAP_ID_AGP Accelerated Graphics Port
@@ -450,11 +450,11 @@ EXPORT_SYMBOL(pci_find_capability);
/**
* pci_bus_find_capability - query for devices' capabilities
- * @bus: the PCI bus to query
+ * @bus: the PCI bus to query
* @devfn: PCI device to query
- * @cap: capability code
+ * @cap: capability code
*
- * Like pci_find_capability() but works for pci devices that do not have a
+ * Like pci_find_capability() but works for PCI devices that do not have a
* pci_dev structure set up yet.
*
* Returns the address of the requested capability structure within the
@@ -535,7 +535,7 @@ EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
*
* Returns the address of the requested extended capability structure
* within the device's PCI configuration space or 0 if the device does
- * not support it. Possible values for @cap:
+ * not support it. Possible values for @cap include:
*
* %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
* %PCI_EXT_CAP_ID_VC Virtual Channel
@@ -618,12 +618,13 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
EXPORT_SYMBOL_GPL(pci_find_ht_capability);
/**
- * pci_find_parent_resource - return resource region of parent bus of given region
+ * pci_find_parent_resource - return resource region of parent bus of given
+ * region
* @dev: PCI device structure contains resources to be searched
* @res: child resource record for which parent is sought
*
- * For given resource region of given device, return the resource
- * region of parent bus the given region is contained in.
+ * For given resource region of given device, return the resource region of
+ * parent bus the given region is contained in.
*/
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res)
@@ -800,7 +801,7 @@ static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
- * given PCI device
+ * given PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
*
@@ -826,7 +827,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state < PCI_D0 || state > PCI_D3hot)
return -EINVAL;
- /* Validate current state:
+ /*
+ * Validate current state:
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
@@ -837,14 +839,15 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
}
- /* check if this device supports the desired state */
+ /* Check if this device supports the desired state */
if ((state == PCI_D1 && !dev->d1_support)
|| (state == PCI_D2 && !dev->d2_support))
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- /* If we're (effectively) in D3, force entire word to 0.
+ /*
+ * If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
@@ -867,11 +870,13 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
break;
}
- /* enter specified state */
+ /* Enter specified state */
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- /* Mandatory power management transition delays */
- /* see PCI PM 1.1 5.6.1 table 18 */
+ /*
+ * Mandatory power management transition delays; see PCI PM 1.1
+ * 5.6.1 table 18
+ */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
@@ -1085,16 +1090,18 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
int error;
- /* bound the state we're entering */
+ /* Bound the state we're entering */
if (state > PCI_D3cold)
state = PCI_D3cold;
else if (state < PCI_D0)
state = PCI_D0;
else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
+
/*
- * If the device or the parent bridge do not support PCI PM,
- * ignore the request if we're doing anything other than putting
- * it into D0 (which would only happen on boot).
+ * If the device or the parent bridge do not support PCI
+ * PM, ignore the request if we're doing anything other
+ * than putting it into D0 (which would only happen on
+ * boot).
*/
return 0;
@@ -1104,8 +1111,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
__pci_start_power_transition(dev, state);
- /* This device is quirked not to be put into D3, so
- don't put it in D3 */
+ /*
+ * This device is quirked not to be put into D3, so don't put it in
+ * D3
+ */
if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
@@ -1127,12 +1136,11 @@ EXPORT_SYMBOL(pci_set_power_state);
* pci_choose_state - Choose the power state of a PCI device
* @dev: PCI device to be suspended
* @state: target sleep state for the whole system. This is the value
- * that is passed to suspend() function.
+ * that is passed to suspend() function.
*
* Returns PCI power state suitable for given device and given system
* message.
*/
-
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
{
pci_power_t ret;
@@ -1310,8 +1318,9 @@ static void pci_restore_ltr_state(struct pci_dev *dev)
}
/**
- * pci_save_state - save the PCI configuration space of a device before suspending
- * @dev: - PCI device that we're dealing with
+ * pci_save_state - save the PCI configuration space of a device before
+ * suspending
+ * @dev: PCI device that we're dealing with
*/
int pci_save_state(struct pci_dev *dev)
{
@@ -1422,7 +1431,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
/**
* pci_restore_state - Restore the saved state of a PCI device
- * @dev: - PCI device that we're dealing with
+ * @dev: PCI device that we're dealing with
*/
void pci_restore_state(struct pci_dev *dev)
{
@@ -1599,8 +1608,8 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
* pci_reenable_device - Resume abandoned device
* @dev: PCI device to be resumed
*
- * Note this function is a backend of pci_default_resume and is not supposed
- * to be called by normal code, write proper resume handler and use it instead.
+ * NOTE: This function is a backend of pci_default_resume() and is not supposed
+ * to be called by normal code, write proper resume handler and use it instead.
*/
int pci_reenable_device(struct pci_dev *dev)
{
@@ -1675,9 +1684,9 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
* pci_enable_device_io - Initialize a device for use with IO space
* @dev: PCI device to be initialized
*
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
*/
int pci_enable_device_io(struct pci_dev *dev)
{
@@ -1689,9 +1698,9 @@ EXPORT_SYMBOL(pci_enable_device_io);
* pci_enable_device_mem - Initialize a device for use with Memory space
* @dev: PCI device to be initialized
*
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable Memory resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable Memory resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
*/
int pci_enable_device_mem(struct pci_dev *dev)
{
@@ -1703,12 +1712,12 @@ EXPORT_SYMBOL(pci_enable_device_mem);
* pci_enable_device - Initialize device before it's used by a driver.
* @dev: PCI device to be initialized
*
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O and memory. Wake up the device if it was suspended.
- * Beware, this function can fail.
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ * Beware, this function can fail.
*
- * Note we don't actually enable the device many times if we call
- * this function repeatedly (we just increment the count).
+ * Note we don't actually enable the device many times if we call
+ * this function repeatedly (we just increment the count).
*/
int pci_enable_device(struct pci_dev *dev)
{
@@ -1717,8 +1726,8 @@ int pci_enable_device(struct pci_dev *dev)
EXPORT_SYMBOL(pci_enable_device);
/*
- * Managed PCI resources. This manages device on/off, intx/msi/msix
- * on/off and BAR regions. pci_dev itself records msi/msix status, so
+ * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
+ * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
* there's no need to track it separately. pci_devres is initialized
* when a device is enabled using managed PCI device enable interface.
*/
@@ -1836,7 +1845,8 @@ int __weak pcibios_add_device(struct pci_dev *dev)
}
/**
- * pcibios_release_device - provide arch specific hooks when releasing device dev
+ * pcibios_release_device - provide arch specific hooks when releasing
+ * device dev
* @dev: the PCI device being released
*
* Permits the platform to provide architecture specific functionality when
@@ -1927,8 +1937,7 @@ EXPORT_SYMBOL(pci_disable_device);
* @dev: the PCIe device reset
* @state: Reset state to enter into
*
- *
- * Sets the PCIe reset state for the device. This is the default
+ * Set the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1942,7 +1951,6 @@ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
* @dev: the PCIe device reset
* @state: Reset state to enter into
*
- *
* Sets the PCI reset state for the device.
*/
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
@@ -2339,7 +2347,8 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
}
/**
- * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
+ * pci_prepare_to_sleep - prepare PCI device for system-wide transition
+ * into a sleep state
* @dev: Device to handle.
*
* Choose the power state appropriate for the device depending on whether
@@ -2367,7 +2376,8 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
EXPORT_SYMBOL(pci_prepare_to_sleep);
/**
- * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
+ * pci_back_from_sleep - turn PCI device on during system-wide transition
+ * into working state
* @dev: Device to handle.
*
* Disable device's system wake-up capability and put it into D0.
@@ -2777,14 +2787,14 @@ void pci_pm_init(struct pci_dev *dev)
dev->d2_support = true;
if (dev->d1_support || dev->d2_support)
- pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
+ pci_info(dev, "supports%s%s\n",
dev->d1_support ? " D1" : "",
dev->d2_support ? " D2" : "");
}
pmc &= PCI_PM_CAP_PME_MASK;
if (pmc) {
- pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
+ pci_info(dev, "PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -2952,16 +2962,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
res->flags = flags;
if (bei <= PCI_EA_BEI_BAR5)
- pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
bei, res, prop);
else if (bei == PCI_EA_BEI_ROM)
- pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
res, prop);
else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
- pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
bei - PCI_EA_BEI_VF_BAR0, res, prop);
else
- pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
bei, res, prop);
out:
@@ -3005,7 +3015,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
/**
* _pci_add_cap_save_buffer - allocate buffer for saving given
- * capability registers
+ * capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
* @extended: Standard or Extended capability ID
@@ -3186,7 +3196,7 @@ static void pci_disable_acs_redir(struct pci_dev *dev)
}
/**
- * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
+ * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
* @dev: the PCI device
*/
static void pci_std_enable_acs(struct pci_dev *dev)
@@ -3609,13 +3619,14 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
EXPORT_SYMBOL_GPL(pci_common_swizzle);
/**
- * pci_release_region - Release a PCI bar
- * @pdev: PCI device whose resources were previously reserved by pci_request_region
- * @bar: BAR to release
+ * pci_release_region - Release a PCI bar
+ * @pdev: PCI device whose resources were previously reserved by
+ * pci_request_region()
+ * @bar: BAR to release
*
- * Releases the PCI I/O and memory resources previously reserved by a
- * successful call to pci_request_region. Call this function only
- * after all use of the PCI regions has ceased.
+ * Releases the PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_region(). Call this function only
+ * after all use of the PCI regions has ceased.
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
@@ -3637,23 +3648,23 @@ void pci_release_region(struct pci_dev *pdev, int bar)
EXPORT_SYMBOL(pci_release_region);
/**
- * __pci_request_region - Reserved PCI I/O and memory resource
- * @pdev: PCI device whose resources are to be reserved
- * @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
- * @exclusive: whether the region access is exclusive or not
+ * __pci_request_region - Reserved PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource.
+ * @exclusive: whether the region access is exclusive or not
*
- * Mark the PCI region associated with PCI device @pdev BR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
*
- * If @exclusive is set, then the region is marked so that userspace
- * is explicitly not allowed to map the resource via /dev/mem or
- * sysfs MMIO access.
+ * If @exclusive is set, then the region is marked so that userspace
+ * is explicitly not allowed to map the resource via /dev/mem or
+ * sysfs MMIO access.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar,
const char *res_name, int exclusive)
@@ -3687,18 +3698,18 @@ err_out:
}
/**
- * pci_request_region - Reserve PCI I/O and memory resource
- * @pdev: PCI device whose resources are to be reserved
- * @bar: BAR to be reserved
- * @res_name: Name to be associated with resource
+ * pci_request_region - Reserve PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
@@ -3707,31 +3718,6 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
EXPORT_SYMBOL(pci_request_region);
/**
- * pci_request_region_exclusive - Reserved PCI I/O and memory resource
- * @pdev: PCI device whose resources are to be reserved
- * @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
- *
- * Mark the PCI region associated with PCI device @pdev BR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
- *
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
- *
- * The key difference that _exclusive makes it that userspace is
- * explicitly not allowed to map the resource via /dev/mem or
- * sysfs.
- */
-int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
- const char *res_name)
-{
- return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
-}
-EXPORT_SYMBOL(pci_request_region_exclusive);
-
-/**
* pci_release_selected_regions - Release selected PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved
* @bars: Bitmask of BARs to be released
@@ -3791,12 +3777,13 @@ int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
/**
- * pci_release_regions - Release reserved PCI I/O and memory resources
- * @pdev: PCI device whose resources were previously reserved by pci_request_regions
+ * pci_release_regions - Release reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources were previously reserved by
+ * pci_request_regions()
*
- * Releases all PCI I/O and memory resources previously reserved by a
- * successful call to pci_request_regions. Call this function only
- * after all use of the PCI regions has ceased.
+ * Releases all PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_regions(). Call this function only
+ * after all use of the PCI regions has ceased.
*/
void pci_release_regions(struct pci_dev *pdev)
@@ -3806,17 +3793,17 @@ void pci_release_regions(struct pci_dev *pdev)
EXPORT_SYMBOL(pci_release_regions);
/**
- * pci_request_regions - Reserved PCI I/O and memory resources
- * @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * pci_request_regions - Reserve PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
@@ -3825,20 +3812,19 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
EXPORT_SYMBOL(pci_request_regions);
/**
- * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
- * @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as being reserved
+ * by owner @res_name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
- * pci_request_regions_exclusive() will mark the region so that
- * /dev/mem and the sysfs MMIO access will not be allowed.
+ * pci_request_regions_exclusive() will mark the region so that /dev/mem
+ * and the sysfs MMIO access will not be allowed.
*
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
+ * Returns 0 on success, or %EBUSY on error. A warning message is also
+ * printed on failure.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
@@ -3849,7 +3835,7 @@ EXPORT_SYMBOL(pci_request_regions_exclusive);
/*
* Record the PCI IO range (expressed as CPU physical address + size).
- * Return a negative value if an error has occured, zero otherwise
+ * Return a negative value if an error has occurred, zero otherwise
*/
int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
resource_size_t size)
@@ -3905,14 +3891,14 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
}
/**
- * pci_remap_iospace - Remap the memory mapped I/O space
- * @res: Resource describing the I/O space
- * @phys_addr: physical address of range to be mapped
+ * pci_remap_iospace - Remap the memory mapped I/O space
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
*
- * Remap the memory mapped I/O space described by the @res
- * and the CPU physical address @phys_addr into virtual address space.
- * Only architectures that have memory mapped IO functions defined
- * (and the PCI_IOBASE value defined) should call this function.
+ * Remap the memory mapped I/O space described by the @res and the CPU
+ * physical address @phys_addr into virtual address space. Only
+ * architectures that have memory mapped IO functions defined (and the
+ * PCI_IOBASE value defined) should call this function.
*/
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
@@ -3928,8 +3914,10 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
pgprot_device(PAGE_KERNEL));
#else
- /* this architecture does not have memory mapped I/O space,
- so this function should never be called */
+ /*
+ * This architecture does not have memory mapped I/O space,
+ * so this function should never be called
+ */
WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
return -ENODEV;
#endif
@@ -3937,12 +3925,12 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
EXPORT_SYMBOL(pci_remap_iospace);
/**
- * pci_unmap_iospace - Unmap the memory mapped I/O space
- * @res: resource to be unmapped
+ * pci_unmap_iospace - Unmap the memory mapped I/O space
+ * @res: resource to be unmapped
*
- * Unmap the CPU virtual address @res from virtual address space.
- * Only architectures that have memory mapped IO functions defined
- * (and the PCI_IOBASE value defined) should call this function.
+ * Unmap the CPU virtual address @res from virtual address space. Only
+ * architectures that have memory mapped IO functions defined (and the
+ * PCI_IOBASE value defined) should call this function.
*/
void pci_unmap_iospace(struct resource *res)
{
@@ -4185,7 +4173,7 @@ int pci_set_cacheline_size(struct pci_dev *dev)
if (cacheline_size == pci_cache_line_size)
return 0;
- pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
+ pci_info(dev, "cache line size of %d is not supported\n",
pci_cache_line_size << 2);
return -EINVAL;
@@ -4288,7 +4276,7 @@ EXPORT_SYMBOL(pci_clear_mwi);
* @pdev: the PCI device to operate on
* @enable: boolean: whether to enable or disable PCI INTx
*
- * Enables/disables PCI INTx for device dev
+ * Enables/disables PCI INTx for device @pdev
*/
void pci_intx(struct pci_dev *pdev, int enable)
{
@@ -4364,9 +4352,8 @@ done:
* pci_check_and_mask_intx - mask INTx on pending interrupt
* @dev: the PCI device to operate on
*
- * Check if the device dev has its INTx line asserted, mask it and
- * return true in that case. False is returned if no interrupt was
- * pending.
+ * Check if the device dev has its INTx line asserted, mask it and return
+ * true in that case. False is returned if no interrupt was pending.
*/
bool pci_check_and_mask_intx(struct pci_dev *dev)
{
@@ -4378,9 +4365,9 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
* pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
* @dev: the PCI device to operate on
*
- * Check if the device dev has its INTx line asserted, unmask it if not
- * and return true. False is returned and the mask remains active if
- * there was still an interrupt pending.
+ * Check if the device dev has its INTx line asserted, unmask it if not and
+ * return true. False is returned and the mask remains active if there was
+ * still an interrupt pending.
*/
bool pci_check_and_unmask_intx(struct pci_dev *dev)
{
@@ -4389,7 +4376,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
/**
- * pci_wait_for_pending_transaction - waits for pending transaction
+ * pci_wait_for_pending_transaction - wait for pending transaction
* @dev: the PCI device to operate on
*
* Return 0 if transaction is pending 1 otherwise.
@@ -4447,7 +4434,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
/**
* pcie_has_flr - check if a device supports function level resets
- * @dev: device to check
+ * @dev: device to check
*
* Returns true if the device advertises support for PCIe function level
* resets.
@@ -4466,7 +4453,7 @@ EXPORT_SYMBOL_GPL(pcie_has_flr);
/**
* pcie_flr - initiate a PCIe function level reset
- * @dev: device to reset
+ * @dev: device to reset
*
* Initiate a function level reset on @dev. The caller should ensure the
* device supports FLR before calling this function, e.g. by using the
@@ -4810,6 +4797,7 @@ static void pci_dev_restore(struct pci_dev *dev)
*
* The device function is presumed to be unused and the caller is holding
* the device mutex lock when this function is called.
+ *
* Resetting the device will make the contents of PCI configuration space
* random, so any caller of this must be prepared to reinitialise the
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
@@ -5373,8 +5361,8 @@ EXPORT_SYMBOL_GPL(pci_reset_bus);
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
*
- * Returns mmrbc: maximum designed memory read count in bytes
- * or appropriate error value.
+ * Returns mmrbc: maximum designed memory read count in bytes or
+ * appropriate error value.
*/
int pcix_get_max_mmrbc(struct pci_dev *dev)
{
@@ -5396,8 +5384,8 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
* pcix_get_mmrbc - get PCI-X maximum memory read byte count
* @dev: PCI device to query
*
- * Returns mmrbc: maximum memory read count in bytes
- * or appropriate error value.
+ * Returns mmrbc: maximum memory read count in bytes or appropriate error
+ * value.
*/
int pcix_get_mmrbc(struct pci_dev *dev)
{
@@ -5421,7 +5409,7 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
* @mmrbc: maximum memory read count in bytes
* valid values are 512, 1024, 2048, 4096
*
- * If possible sets maximum memory read byte count, some bridges have erratas
+ * If possible sets maximum memory read byte count, some bridges have errata
* that prevent this.
*/
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
@@ -5466,8 +5454,7 @@ EXPORT_SYMBOL(pcix_set_mmrbc);
* pcie_get_readrq - get PCI Express read request size
* @dev: PCI device to query
*
- * Returns maximum memory read request in bytes
- * or appropriate error value.
+ * Returns maximum memory read request in bytes or appropriate error value.
*/
int pcie_get_readrq(struct pci_dev *dev)
{
@@ -5495,10 +5482,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
return -EINVAL;
/*
- * If using the "performance" PCIe config, we clamp the
- * read rq size to the max packet size to prevent the
- * host bridge generating requests larger than we can
- * cope with
+ * If using the "performance" PCIe config, we clamp the read rq
+ * size to the max packet size to keep the host bridge from
+ * generating requests larger than we can cope with.
*/
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
int mps = pcie_get_mps(dev);
@@ -6144,6 +6130,7 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
if (parent)
domain = of_get_pci_domain_nr(parent->of_node);
+
/*
* Check DT domain and use_dt_domains values.
*
@@ -6264,8 +6251,7 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
disable_acs_redir_param = str + 18;
} else {
- printk(KERN_ERR "PCI: Unknown option `%s'\n",
- str);
+ pr_err("PCI: Unknown option `%s'\n", str);
}
}
str = k;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d994839a3e24..9cb99380c61e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -597,7 +597,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
void pci_aer_clear_device_status(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
+static inline void pci_aer_init(struct pci_dev *d) { }
static inline void pci_aer_exit(struct pci_dev *d) { }
static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index f8fc2114ad39..b45bc47d04fe 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -12,6 +12,9 @@
* Andrew Patterson <andrew.patterson@hp.com>
*/
+#define pr_fmt(fmt) "AER: " fmt
+#define dev_fmt pr_fmt
+
#include <linux/cper.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
@@ -779,10 +782,11 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
u8 bus = info->id >> 8;
u8 devfn = info->id & 0xff;
- pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
- info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity],
- pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -964,8 +968,7 @@ static bool find_source_device(struct pci_dev *parent,
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
if (!e_info->error_dev_num) {
- pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n",
- e_info->id);
+ pci_info(parent, "can't find device of ID%04x\n", e_info->id);
return false;
}
return true;
@@ -1377,25 +1380,24 @@ static int aer_probe(struct pcie_device *dev)
int status;
struct aer_rpc *rpc;
struct device *device = &dev->device;
+ struct pci_dev *port = dev->port;
rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
- if (!rpc) {
- dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
+ if (!rpc)
return -ENOMEM;
- }
- rpc->rpd = dev->port;
+
+ rpc->rpd = port;
set_service_data(dev, rpc);
status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
IRQF_SHARED, "aerdrv", dev);
if (status) {
- dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
- dev->irq);
+ pci_err(port, "request AER IRQ %d failed\n", dev->irq);
return status;
}
aer_enable_rootport(rpc);
- dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
+ pci_info(port, "enabled with IRQ %d\n", dev->irq);
return 0;
}
@@ -1419,7 +1421,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
rc = pci_bus_error_reset(dev);
- pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
+ pci_info(dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 95d4759664b3..043b8b0cfcc5 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -12,6 +12,8 @@
* Huang Ying <ying.huang@intel.com>
*/
+#define dev_fmt(fmt) "aer_inject: " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
@@ -332,14 +334,14 @@ static int aer_inject(struct aer_error_inj *einj)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
if (!rpdev) {
- pci_err(dev, "aer_inject: Root port not found\n");
+ pci_err(dev, "Root port not found\n");
ret = -ENODEV;
goto out_put;
}
pos_cap_err = dev->aer_cap;
if (!pos_cap_err) {
- pci_err(dev, "aer_inject: Device doesn't support AER\n");
+ pci_err(dev, "Device doesn't support AER\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
@@ -350,7 +352,7 @@ static int aer_inject(struct aer_error_inj *einj)
rp_pos_cap_err = rpdev->aer_cap;
if (!rp_pos_cap_err) {
- pci_err(rpdev, "aer_inject: Root port doesn't support AER\n");
+ pci_err(rpdev, "Root port doesn't support AER\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
@@ -398,14 +400,14 @@ static int aer_inject(struct aer_error_inj *einj)
if (!aer_mask_override && einj->cor_status &&
!(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
- pci_warn(dev, "aer_inject: The correctable error(s) is masked by device\n");
+ pci_warn(dev, "The correctable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
if (!aer_mask_override && einj->uncor_status &&
!(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
- pci_warn(dev, "aer_inject: The uncorrectable error(s) is masked by device\n");
+ pci_warn(dev, "The uncorrectable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
@@ -460,19 +462,17 @@ static int aer_inject(struct aer_error_inj *einj)
if (device) {
edev = to_pcie_device(device);
if (!get_service_data(edev)) {
- dev_warn(&edev->device,
- "aer_inject: AER service is not initialized\n");
+ pci_warn(edev->port, "AER service is not initialized\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
- dev_info(&edev->device,
- "aer_inject: Injecting errors %08x/%08x into device %s\n",
+ pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
local_irq_disable();
generic_handle_irq(edev->irq);
local_irq_enable();
} else {
- pci_err(rpdev, "aer_inject: AER device not found\n");
+ pci_err(rpdev, "AER device not found\n");
ret = -ENODEV;
}
out_put:
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 727e3c1ef9a4..fd4cb75088f9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -196,6 +196,36 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
+static bool pcie_retrain_link(struct pcie_link_state *link)
+{
+ struct pci_dev *parent = link->pdev;
+ unsigned long end_jiffies;
+ u16 reg16;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ reg16 |= PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ if (parent->clear_retrain_link) {
+ /*
+ * Due to an erratum in some devices the Retrain Link bit
+ * needs to be cleared again manually to allow the link
+ * training to succeed.
+ */
+ reg16 &= ~PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Wait for link training end. Break out after waiting for timeout */
+ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+ do {
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+ msleep(1);
+ } while (time_before(jiffies, end_jiffies));
+ return !(reg16 & PCI_EXP_LNKSTA_LT);
+}
+
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -205,7 +235,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
u16 reg16, parent_reg, child_reg[8];
- unsigned long start_jiffies;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
@@ -263,21 +292,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
- /* Retrain link */
- reg16 |= PCI_EXP_LNKCTL_RL;
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
-
- /* Wait for link training end. Break out after waiting for timeout */
- start_jiffies = jiffies;
- for (;;) {
- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
- break;
- msleep(1);
- }
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ if (pcie_retrain_link(link))
return;
/* Training failed. Restore common clock configurations */
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index 4fa9e3523ee1..77e685771487 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -107,11 +107,25 @@ static void pcie_bandwidth_notification_remove(struct pcie_device *srv)
free_irq(srv->irq, srv);
}
+static int pcie_bandwidth_notification_suspend(struct pcie_device *srv)
+{
+ pcie_disable_link_bandwidth_notification(srv->port);
+ return 0;
+}
+
+static int pcie_bandwidth_notification_resume(struct pcie_device *srv)
+{
+ pcie_enable_link_bandwidth_notification(srv->port);
+ return 0;
+}
+
static struct pcie_port_service_driver pcie_bandwidth_notification_driver = {
.name = "pcie_bw_notification",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_BWNOTIF,
.probe = pcie_bandwidth_notification_probe,
+ .suspend = pcie_bandwidth_notification_suspend,
+ .resume = pcie_bandwidth_notification_resume,
.remove = pcie_bandwidth_notification_remove,
};
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 7b77754a82de..a32ec3487a8d 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -6,6 +6,8 @@
* Copyright (C) 2016 Intel Corp.
*/
+#define dev_fmt(fmt) "DPC: " fmt
+
#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -100,7 +102,6 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
{
unsigned long timeout = jiffies + HZ;
struct pci_dev *pdev = dpc->dev->port;
- struct device *dev = &dpc->dev->device;
u16 cap = dpc->cap_pos, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
@@ -110,7 +111,7 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
}
if (status & PCI_EXP_DPC_RP_BUSY) {
- dev_warn(dev, "DPC root port still busy\n");
+ pci_warn(pdev, "root port still busy\n");
return -EBUSY;
}
return 0;
@@ -148,7 +149,6 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
- struct device *dev = &dpc->dev->device;
struct pci_dev *pdev = dpc->dev->port;
u16 cap = dpc->cap_pos, dpc_status, first_error;
u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
@@ -156,13 +156,13 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
- dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
+ pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
status, mask);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
- dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
+ pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
sev, syserr, exc);
/* Get First Error Pointer */
@@ -171,7 +171,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
if ((status & ~mask) & (1 << i))
- dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
+ pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
first_error == i ? " (First)" : "");
}
@@ -185,18 +185,18 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
&dw2);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
&dw3);
- dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n",
+ pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
dw0, dw1, dw2, dw3);
if (dpc->rp_log_size < 5)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
- dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log);
+ pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
for (i = 0; i < dpc->rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
- dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
+ pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
@@ -229,18 +229,17 @@ static irqreturn_t dpc_handler(int irq, void *context)
struct aer_err_info info;
struct dpc_dev *dpc = context;
struct pci_dev *pdev = dpc->dev->port;
- struct device *dev = &dpc->dev->device;
u16 cap = dpc->cap_pos, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
- dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n",
+ pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
status, source);
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
- dev_warn(dev, "DPC %s detected\n",
+ pci_warn(pdev, "%s detected\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
(reason == 2) ? "ERR_FATAL" :
@@ -307,7 +306,7 @@ static int dpc_probe(struct pcie_device *dev)
dpc_handler, IRQF_SHARED,
"pcie-dpc", dpc);
if (status) {
- dev_warn(device, "request IRQ%d failed: %d\n", dev->irq,
+ pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
status);
return status;
}
@@ -319,7 +318,7 @@ static int dpc_probe(struct pcie_device *dev)
if (dpc->rp_extensions) {
dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) {
- dev_err(device, "RP PIO log size %u is invalid\n",
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
dpc->rp_log_size);
dpc->rp_log_size = 0;
}
@@ -328,11 +327,11 @@ static int dpc_probe(struct pcie_device *dev)
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
- dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
- cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
- FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
- FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
- FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
+ pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
+ cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
+ FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
+ FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
+ FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
return status;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 54d593d10396..f38e6c19dd50 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -7,6 +7,8 @@
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*/
+#define dev_fmt(fmt) "PME: " fmt
+
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -194,14 +196,14 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
* assuming that the PME was reported by a PCIe-PCI bridge that
* used devfn different from zero.
*/
- pci_dbg(port, "PME interrupt generated for non-existent device %02x:%02x.%d\n",
- busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
+ busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
found = pcie_pme_from_pci_bridge(bus, 0);
}
out:
if (!found)
- pci_dbg(port, "Spurious native PME interrupt!\n");
+ pci_info(port, "Spurious native interrupt!\n");
}
/**
@@ -341,7 +343,7 @@ static int pcie_pme_probe(struct pcie_device *srv)
return ret;
}
- pci_info(port, "Signaling PME with IRQ %d\n", srv->irq);
+ pci_info(port, "Signaling with IRQ %d\n", srv->irq);
pcie_pme_mark_devices(port);
pcie_pme_interrupt_enable(port, true);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7e12d0163863..0e8e2c186f50 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -317,7 +317,7 @@ fail:
res->flags = 0;
out:
if (res->flags)
- pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res);
+ pci_info(dev, "reg 0x%x: %pR\n", pos, res);
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
@@ -435,7 +435,7 @@ static void pci_read_bridge_io(struct pci_bus *child)
region.start = base;
region.end = limit + io_granularity - 1;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
+ pci_info(dev, " bridge window %pR\n", res);
}
}
@@ -457,7 +457,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child)
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
+ pci_info(dev, " bridge window %pR\n", res);
}
}
@@ -510,7 +510,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
+ pci_info(dev, " bridge window %pR\n", res);
}
}
@@ -540,8 +540,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
if (res && res->flags) {
pci_bus_add_resource(child, res,
PCI_SUBTRACTIVE_DECODE);
- pci_printk(KERN_DEBUG, dev,
- " bridge window %pR (subtractive decode)\n",
+ pci_info(dev, " bridge window %pR (subtractive decode)\n",
res);
}
}
@@ -586,16 +585,10 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(to_pci_host_bridge(dev));
}
-struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+static void pci_init_host_bridge(struct pci_host_bridge *bridge)
{
- struct pci_host_bridge *bridge;
-
- bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
- if (!bridge)
- return NULL;
-
INIT_LIST_HEAD(&bridge->windows);
- bridge->dev.release = pci_release_host_bridge_dev;
+ INIT_LIST_HEAD(&bridge->dma_ranges);
/*
* We assume we can manage these PCIe features. Some systems may
@@ -608,6 +601,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
bridge->native_ltr = 1;
+}
+
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ pci_init_host_bridge(bridge);
+ bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
@@ -622,7 +627,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
if (!bridge)
return NULL;
- INIT_LIST_HEAD(&bridge->windows);
+ pci_init_host_bridge(bridge);
bridge->dev.release = devm_pci_release_host_bridge_dev;
return bridge;
@@ -632,6 +637,7 @@ EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
void pci_free_host_bridge(struct pci_host_bridge *bridge)
{
pci_free_resource_list(&bridge->windows);
+ pci_free_resource_list(&bridge->dma_ranges);
kfree(bridge);
}
@@ -1081,6 +1087,36 @@ static void pci_enable_crs(struct pci_dev *pdev)
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
unsigned int available_buses);
+/**
+ * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus
+ * numbers from EA capability.
+ * @dev: Bridge
+ * @sec: updated with secondary bus number from EA
+ * @sub: updated with subordinate bus number from EA
+ *
+ * If @dev is a bridge with EA capability, update @sec and @sub with
+ * fixed bus numbers from the capability and return true. Otherwise,
+ * return false.
+ */
+static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
+{
+ int ea, offset;
+ u32 dw;
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+ return false;
+
+ /* find PCI EA capability in list */
+ ea = pci_find_capability(dev, PCI_CAP_ID_EA);
+ if (!ea)
+ return false;
+
+ offset = ea + PCI_EA_FIRST_ENT;
+ pci_read_config_dword(dev, offset, &dw);
+ *sec = dw & PCI_EA_SEC_BUS_MASK;
+ *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ return true;
+}
/*
* pci_scan_bridge_extend() - Scan buses behind a bridge
@@ -1115,6 +1151,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
u16 bctl;
u8 primary, secondary, subordinate;
int broken = 0;
+ bool fixed_buses;
+ u8 fixed_sec, fixed_sub;
+ int next_busnr;
/*
* Make sure the bridge is powered on to be able to access config
@@ -1214,17 +1253,24 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
+ /* Read bus numbers from EA Capability (if present) */
+ fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub);
+ if (fixed_buses)
+ next_busnr = fixed_sec;
+ else
+ next_busnr = max + 1;
+
/*
* Prevent assigning a bus number that already exists.
* This can happen when a bridge is hot-plugged, so in this
* case we only re-scan this bus.
*/
- child = pci_find_bus(pci_domain_nr(bus), max+1);
+ child = pci_find_bus(pci_domain_nr(bus), next_busnr);
if (!child) {
- child = pci_add_new_bus(bus, dev, max+1);
+ child = pci_add_new_bus(bus, dev, next_busnr);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max+1,
+ pci_bus_insert_busn_res(child, next_busnr,
bus->busn_res.end);
}
max++;
@@ -1285,7 +1331,13 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
max += i;
}
- /* Set subordinate bus number to its real value */
+ /*
+ * Set subordinate bus number to its real value.
+ * If fixed subordinate bus number exists from EA
+ * capability then use it.
+ */
+ if (fixed_buses)
+ max = fixed_sub;
pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
@@ -1690,7 +1742,7 @@ int pci_setup_device(struct pci_dev *dev)
dev->revision = class & 0xff;
dev->class = class >> 8; /* upper 3 bytes */
- pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n",
+ pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
if (pci_early_dump)
@@ -2026,6 +2078,119 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
+static u16 hpx3_device_type(struct pci_dev *dev)
+{
+ u16 pcie_type = pci_pcie_type(dev);
+ const int pcie_to_hpx3_type[] = {
+ [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
+ [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
+ [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
+ [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
+ [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
+ [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
+ [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
+ [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
+ [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
+ };
+
+ if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
+ return 0;
+
+ return pcie_to_hpx3_type[pcie_type];
+}
+
+static u8 hpx3_function_type(struct pci_dev *dev)
+{
+ if (dev->is_virtfn)
+ return HPX_FN_SRIOV_VIRT;
+ else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
+ return HPX_FN_SRIOV_PHYS;
+ else
+ return HPX_FN_NORMAL;
+}
+
+static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
+{
+ u8 cap_ver = hpx3_cap_id & 0xf;
+
+ if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
+ return true;
+ else if (cap_ver == pcie_cap_id)
+ return true;
+
+ return false;
+}
+
+static void program_hpx_type3_register(struct pci_dev *dev,
+ const struct hpx_type3 *reg)
+{
+ u32 match_reg, write_reg, header, orig_value;
+ u16 pos;
+
+ if (!(hpx3_device_type(dev) & reg->device_type))
+ return;
+
+ if (!(hpx3_function_type(dev) & reg->function_type))
+ return;
+
+ switch (reg->config_space_location) {
+ case HPX_CFG_PCICFG:
+ pos = 0;
+ break;
+ case HPX_CFG_PCIE_CAP:
+ pos = pci_find_capability(dev, reg->pci_exp_cap_id);
+ if (pos == 0)
+ return;
+
+ break;
+ case HPX_CFG_PCIE_CAP_EXT:
+ pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
+ if (pos == 0)
+ return;
+
+ pci_read_config_dword(dev, pos, &header);
+ if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
+ reg->pci_exp_cap_ver))
+ return;
+
+ break;
+ case HPX_CFG_VEND_CAP: /* Fall through */
+ case HPX_CFG_DVSEC: /* Fall through */
+ default:
+ pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
+ return;
+ }
+
+ pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
+
+ if ((match_reg & reg->match_mask_and) != reg->match_value)
+ return;
+
+ pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
+ orig_value = write_reg;
+ write_reg &= reg->reg_mask_and;
+ write_reg |= reg->reg_mask_or;
+
+ if (orig_value == write_reg)
+ return;
+
+ pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
+
+ pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
+ pos, orig_value, write_reg);
+}
+
+static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx3)
+{
+ if (!hpx3)
+ return;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ program_hpx_type3_register(dev, hpx3);
+}
+
int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
{
struct pci_host_bridge *host;
@@ -2206,8 +2371,12 @@ static void pci_configure_serr(struct pci_dev *dev)
static void pci_configure_device(struct pci_dev *dev)
{
- struct hotplug_params hpp;
- int ret;
+ static const struct hotplug_program_ops hp_ops = {
+ .program_type0 = program_hpp_type0,
+ .program_type1 = program_hpp_type1,
+ .program_type2 = program_hpp_type2,
+ .program_type3 = program_hpx_type3,
+ };
pci_configure_mps(dev);
pci_configure_extended_tags(dev, NULL);
@@ -2216,14 +2385,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_eetlp_prefix(dev);
pci_configure_serr(dev);
- memset(&hpp, 0, sizeof(hpp));
- ret = pci_get_hp_params(dev, &hpp);
- if (ret)
- return;
-
- program_hpp_type2(dev, hpp.t2);
- program_hpp_type1(dev, hpp.t1);
- program_hpp_type0(dev, hpp.t0);
+ pci_acpi_program_hp_params(dev, &hp_ops);
}
static void pci_release_capabilities(struct pci_dev *dev)
@@ -3086,7 +3248,7 @@ int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
conflict = request_resource_conflict(parent_res, res);
if (conflict)
- dev_printk(KERN_DEBUG, &b->dev,
+ dev_info(&b->dev,
"busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
res, pci_is_root_bus(b) ? "domain " : "",
parent_res, conflict->name, conflict);
@@ -3106,8 +3268,7 @@ int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
size = bus_max - res->start + 1;
ret = adjust_resource(res, res->start, size);
- dev_printk(KERN_DEBUG, &b->dev,
- "busn_res: %pR end %s updated to %02x\n",
+ dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n",
&old_res, ret ? "can not be" : "is", bus_max);
if (!ret && !res->parent)
@@ -3125,8 +3286,7 @@ void pci_bus_release_busn_res(struct pci_bus *b)
return;
ret = release_resource(res);
- dev_printk(KERN_DEBUG, &b->dev,
- "busn_res: %pR %s released\n",
+ dev_info(&b->dev, "busn_res: %pR %s released\n",
res, ret ? "can not be" : "is");
}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 6fa1627ce08d..445b51db75b0 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -222,6 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
}
/* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
+ /* fall through */
default:
ret = -EINVAL;
break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index eb0afc275901..0f16acc323c6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -159,8 +159,7 @@ static int __init pci_apply_final_quirks(void)
u8 tmp;
if (pci_cache_line_size)
- printk(KERN_DEBUG "PCI: CLS %u bytes\n",
- pci_cache_line_size << 2);
+ pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
pci_apply_fixup_final_quirks = true;
for_each_pci_dev(dev) {
@@ -177,16 +176,16 @@ static int __init pci_apply_final_quirks(void)
if (!tmp || cls == tmp)
continue;
- printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
- cls << 2, tmp << 2,
- pci_dfl_cache_line_size << 2);
+ pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
+ cls << 2, tmp << 2,
+ pci_dfl_cache_line_size << 2);
pci_cache_line_size = pci_dfl_cache_line_size;
}
}
if (!pci_cache_line_size) {
- printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
- cls << 2, pci_dfl_cache_line_size << 2);
+ pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
+ pci_dfl_cache_line_size << 2);
pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
}
@@ -2245,6 +2244,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+/*
+ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+ * Link bit cleared after starting the link retrain process to allow this
+ * process to finish.
+ *
+ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
+ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
+ */
+static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+{
+ dev->clear_retrain_link = 1;
+ pci_info(dev, "Enable PCIe Retrain Link quirk\n");
+}
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+
static void fixup_rev1_53c810(struct pci_dev *dev)
{
u32 class = dev->class;
@@ -2596,7 +2612,7 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
pci_read_config_dword(dev, 0x74, &cfg);
if (cfg & ((1 << 2) | (1 << 15))) {
- printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n");
+ pr_info("Rewriting IRQ routing register on MCP55\n");
cfg &= ~((1 << 2) | (1 << 15));
pci_write_config_dword(dev, 0x74, cfg);
}
@@ -3408,6 +3424,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
/*
* Root port on some Cavium CN8xxx chips do not successfully complete a bus
@@ -4905,6 +4922,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
/* AMD Stoney platform GPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
@@ -5122,3 +5140,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
+
+/*
+ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
+ * not always reset the secondary Nvidia GPU between reboots if the system
+ * is configured to use Hybrid Graphics mode. This results in the GPU
+ * being left in whatever state it was in during the *previous* boot, which
+ * causes spurious interrupts from the GPU, which in turn causes us to
+ * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
+ * this also completely breaks nouveau.
+ *
+ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
+ * clean state and fixes all these issues.
+ *
+ * When the machine is configured in Dedicated display mode, the issue
+ * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
+ * mode, so we can detect that and avoid resetting it.
+ */
+static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
+{
+ void __iomem *map;
+ int ret;
+
+ if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
+ pdev->subsystem_device != 0x222e ||
+ !pdev->reset_fn)
+ return;
+
+ if (pci_enable_device_mem(pdev))
+ return;
+
+ /*
+ * Based on nvkm_device_ctor() in
+ * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+ */
+ map = pci_iomap(pdev, 0, 0x23000);
+ if (!map) {
+ pci_err(pdev, "Can't map MMIO space\n");
+ goto out_disable;
+ }
+
+ /*
+ * Make sure the GPU looks like it's been POSTed before resetting
+ * it.
+ */
+ if (ioread32(map + 0x2240c) & 0x2) {
+ pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
+ ret = pci_reset_function(pdev);
+ if (ret < 0)
+ pci_err(pdev, "Failed to reset GPU: %d\n", ret);
+ }
+
+ iounmap(map);
+out_disable:
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
+ PCI_CLASS_DISPLAY_VGA, 8,
+ quirk_reset_lenovo_thinkpad_p50_nvgpu);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 2b5f720862d3..5c7922612733 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -33,7 +33,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
struct pci_bus *bus;
int ret;
- ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data);
+ ret = fn(pdev, pci_dev_id(pdev), data);
if (ret)
return ret;
@@ -88,9 +88,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
return ret;
continue;
case PCI_EXP_TYPE_PCIE_BRIDGE:
- ret = fn(tmp,
- PCI_DEVID(tmp->bus->number,
- tmp->devfn), data);
+ ret = fn(tmp, pci_dev_id(tmp), data);
if (ret)
return ret;
continue;
@@ -101,9 +99,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
PCI_DEVID(tmp->subordinate->number,
PCI_DEVFN(0, 0)), data);
else
- ret = fn(tmp,
- PCI_DEVID(tmp->bus->number,
- tmp->devfn), data);
+ ret = fn(tmp, pci_dev_id(tmp), data);
if (ret)
return ret;
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index ec44a0f3a7ac..0cdd5ff389de 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -49,17 +49,15 @@ static void free_list(struct list_head *head)
}
/**
- * add_to_list() - add a new resource tracker to the list
+ * add_to_list() - Add a new resource tracker to the list
* @head: Head of the list
- * @dev: device corresponding to which the resource
- * belongs
- * @res: The resource to be tracked
- * @add_size: additional size to be optionally added
- * to the resource
+ * @dev: Device to which the resource belongs
+ * @res: Resource to be tracked
+ * @add_size: Additional size to be optionally added to the resource
*/
-static int add_to_list(struct list_head *head,
- struct pci_dev *dev, struct resource *res,
- resource_size_t add_size, resource_size_t min_align)
+static int add_to_list(struct list_head *head, struct pci_dev *dev,
+ struct resource *res, resource_size_t add_size,
+ resource_size_t min_align)
{
struct pci_dev_resource *tmp;
@@ -80,8 +78,7 @@ static int add_to_list(struct list_head *head,
return 0;
}
-static void remove_from_list(struct list_head *head,
- struct resource *res)
+static void remove_from_list(struct list_head *head, struct resource *res)
{
struct pci_dev_resource *dev_res, *tmp;
@@ -158,7 +155,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
tmp->res = r;
tmp->dev = dev;
- /* fallback is smallest one or list is empty*/
+ /* Fallback is smallest one or list is empty */
n = head;
list_for_each_entry(dev_res, head, list) {
resource_size_t align;
@@ -171,21 +168,20 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
break;
}
}
- /* Insert it just before n*/
+ /* Insert it just before n */
list_add_tail(&tmp->list, n);
}
}
-static void __dev_sort_resources(struct pci_dev *dev,
- struct list_head *head)
+static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
u16 class = dev->class >> 8;
- /* Don't touch classless devices or host bridges or ioapics. */
+ /* Don't touch classless devices or host bridges or IOAPICs */
if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
return;
- /* Don't touch ioapic devices already enabled by firmware */
+ /* Don't touch IOAPIC devices already enabled by firmware */
if (class == PCI_CLASS_SYSTEM_PIC) {
u16 command;
pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -204,19 +200,18 @@ static inline void reset_resource(struct resource *res)
}
/**
- * reassign_resources_sorted() - satisfy any additional resource requests
+ * reassign_resources_sorted() - Satisfy any additional resource requests
*
- * @realloc_head : head of the list tracking requests requiring additional
- * resources
- * @head : head of the list tracking requests with allocated
- * resources
+ * @realloc_head: Head of the list tracking requests requiring
+ * additional resources
+ * @head: Head of the list tracking requests with allocated
+ * resources
*
- * Walk through each element of the realloc_head and try to procure
- * additional resources for the element, provided the element
- * is in the head list.
+ * Walk through each element of the realloc_head and try to procure additional
+ * resources for the element, provided the element is in the head list.
*/
static void reassign_resources_sorted(struct list_head *realloc_head,
- struct list_head *head)
+ struct list_head *head)
{
struct resource *res;
struct pci_dev_resource *add_res, *tmp;
@@ -228,18 +223,18 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
bool found_match = false;
res = add_res->res;
- /* skip resource that has been reset */
+ /* Skip resource that has been reset */
if (!res->flags)
goto out;
- /* skip this resource if not found in head list */
+ /* Skip this resource if not found in head list */
list_for_each_entry(dev_res, head, list) {
if (dev_res->res == res) {
found_match = true;
break;
}
}
- if (!found_match)/* just skip */
+ if (!found_match) /* Just skip */
continue;
idx = res - &add_res->dev->resource[0];
@@ -255,10 +250,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
if (pci_reassign_resource(add_res->dev, idx,
add_size, align))
- pci_printk(KERN_DEBUG, add_res->dev,
- "failed to add %llx res[%d]=%pR\n",
- (unsigned long long)add_size,
- idx, res);
+ pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
+ (unsigned long long) add_size, idx,
+ res);
}
out:
list_del(&add_res->list);
@@ -267,14 +261,14 @@ out:
}
/**
- * assign_requested_resources_sorted() - satisfy resource requests
+ * assign_requested_resources_sorted() - Satisfy resource requests
*
- * @head : head of the list tracking requests for resources
- * @fail_head : head of the list tracking requests that could
- * not be allocated
+ * @head: Head of the list tracking requests for resources
+ * @fail_head: Head of the list tracking requests that could not be
+ * allocated
*
- * Satisfy resource requests of each element in the list. Add
- * requests that could not satisfied to the failed_list.
+ * Satisfy resource requests of each element in the list. Add requests that
+ * could not be satisfied to the failed_list.
*/
static void assign_requested_resources_sorted(struct list_head *head,
struct list_head *fail_head)
@@ -290,8 +284,9 @@ static void assign_requested_resources_sorted(struct list_head *head,
pci_assign_resource(dev_res->dev, idx)) {
if (fail_head) {
/*
- * if the failed res is for ROM BAR, and it will
- * be enabled later, don't add it to the list
+ * If the failed resource is a ROM BAR and
+ * it will be enabled later, don't add it
+ * to the list.
*/
if (!((idx == PCI_ROM_RESOURCE) &&
(!(res->flags & IORESOURCE_ROM_ENABLE))))
@@ -310,15 +305,14 @@ static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
struct pci_dev_resource *fail_res;
unsigned long mask = 0;
- /* check failed type */
+ /* Check failed type */
list_for_each_entry(fail_res, fail_head, list)
mask |= fail_res->flags;
/*
- * one pref failed resource will set IORESOURCE_MEM,
- * as we can allocate pref in non-pref range.
- * Will release all assigned non-pref sibling resources
- * according to that bit.
+ * One pref failed resource will set IORESOURCE_MEM, as we can
+ * allocate pref in non-pref range. Will release all assigned
+ * non-pref sibling resources according to that bit.
*/
return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
}
@@ -328,11 +322,11 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
if (res->flags & IORESOURCE_IO)
return !!(mask & IORESOURCE_IO);
- /* check pref at first */
+ /* Check pref at first */
if (res->flags & IORESOURCE_PREFETCH) {
if (mask & IORESOURCE_PREFETCH)
return true;
- /* count pref if its parent is non-pref */
+ /* Count pref if its parent is non-pref */
else if ((mask & IORESOURCE_MEM) &&
!(res->parent->flags & IORESOURCE_PREFETCH))
return true;
@@ -343,33 +337,33 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
if (res->flags & IORESOURCE_MEM)
return !!(mask & IORESOURCE_MEM);
- return false; /* should not get here */
+ return false; /* Should not get here */
}
static void __assign_resources_sorted(struct list_head *head,
- struct list_head *realloc_head,
- struct list_head *fail_head)
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
{
/*
- * Should not assign requested resources at first.
- * they could be adjacent, so later reassign can not reallocate
- * them one by one in parent resource window.
- * Try to assign requested + add_size at beginning
- * if could do that, could get out early.
- * if could not do that, we still try to assign requested at first,
- * then try to reassign add_size for some resources.
+ * Should not assign requested resources at first. They could be
+ * adjacent, so later reassign can not reallocate them one by one in
+ * parent resource window.
+ *
+ * Try to assign requested + add_size at beginning. If could do that,
+ * could get out early. If could not do that, we still try to assign
+ * requested at first, then try to reassign add_size for some resources.
*
* Separate three resource type checking if we need to release
* assigned resource after requested + add_size try.
- * 1. if there is io port assign fail, will release assigned
- * io port.
- * 2. if there is pref mmio assign fail, release assigned
- * pref mmio.
- * if assigned pref mmio's parent is non-pref mmio and there
- * is non-pref mmio assign fail, will release that assigned
- * pref mmio.
- * 3. if there is non-pref mmio assign fail or pref mmio
- * assigned fail, will release assigned non-pref mmio.
+ *
+ * 1. If IO port assignment fails, will release assigned IO
+ * port.
+ * 2. If pref MMIO assignment fails, release assigned pref
+ * MMIO. If assigned pref MMIO's parent is non-pref MMIO
+ * and non-pref MMIO assignment fails, will release that
+ * assigned pref MMIO.
+ * 3. If non-pref MMIO assignment fails or pref MMIO
+ * assignment fails, will release assigned non-pref MMIO.
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
@@ -398,7 +392,7 @@ static void __assign_resources_sorted(struct list_head *head,
/*
* There are two kinds of additional resources in the list:
* 1. bridge resource -- IORESOURCE_STARTALIGN
- * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
+ * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
* Here just fix the additional alignment for bridge
*/
if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
@@ -407,10 +401,10 @@ static void __assign_resources_sorted(struct list_head *head,
add_align = get_res_add_align(realloc_head, dev_res->res);
/*
- * The "head" list is sorted by the alignment to make sure
- * resources with bigger alignment will be assigned first.
- * After we change the alignment of a dev_res in "head" list,
- * we need to reorder the list by alignment to make it
+ * The "head" list is sorted by alignment so resources with
+ * bigger alignment will be assigned first. After we
+ * change the alignment of a dev_res in "head" list, we
+ * need to reorder the list by alignment to make it
* consistent.
*/
if (add_align > dev_res->res->start) {
@@ -435,7 +429,7 @@ static void __assign_resources_sorted(struct list_head *head,
/* Try updated head list with add_size added */
assign_requested_resources_sorted(head, &local_fail_head);
- /* all assigned with add_size ? */
+ /* All assigned with add_size? */
if (list_empty(&local_fail_head)) {
/* Remove head list from realloc_head list */
list_for_each_entry(dev_res, head, list)
@@ -445,13 +439,13 @@ static void __assign_resources_sorted(struct list_head *head,
return;
}
- /* check failed type */
+ /* Check failed type */
fail_type = pci_fail_res_type_mask(&local_fail_head);
- /* remove not need to be released assigned res from head list etc */
+ /* Remove not need to be released assigned res from head list etc */
list_for_each_entry_safe(dev_res, tmp_res, head, list)
if (dev_res->res->parent &&
!pci_need_to_release(fail_type, dev_res->res)) {
- /* remove it from realloc_head list */
+ /* Remove it from realloc_head list */
remove_from_list(realloc_head, dev_res->res);
remove_from_list(&save_head, dev_res->res);
list_del(&dev_res->list);
@@ -477,16 +471,15 @@ requested_and_reassign:
/* Satisfy the must-have resource requests */
assign_requested_resources_sorted(head, fail_head);
- /* Try to satisfy any additional optional resource
- requests */
+ /* Try to satisfy any additional optional resource requests */
if (realloc_head)
reassign_resources_sorted(realloc_head, head);
free_list(head);
}
static void pdev_assign_resources_sorted(struct pci_dev *dev,
- struct list_head *add_head,
- struct list_head *fail_head)
+ struct list_head *add_head,
+ struct list_head *fail_head)
{
LIST_HEAD(head);
@@ -563,17 +556,19 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_setup_cardbus);
-/* Initialize bridges with base/limit values we have collected.
- PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
- requires that if there is no I/O ports or memory behind the
- bridge, corresponding range must be turned off by writing base
- value greater than limit to the bridge's base/limit registers.
-
- Note: care must be taken when updating I/O base/limit registers
- of bridges which support 32-bit I/O. This update requires two
- config space writes, so it's quite possible that an I/O window of
- the bridge will have some undesirable address (e.g. 0) after the
- first write. Ditto 64-bit prefetchable MMIO. */
+/*
+ * Initialize bridges with base/limit values we have collected. PCI-to-PCI
+ * Bridge Architecture Specification rev. 1.1 (1998) requires that if there
+ * are no I/O ports or memory behind the bridge, the corresponding range
+ * must be turned off by writing base value greater than limit to the
+ * bridge's base/limit registers.
+ *
+ * Note: care must be taken when updating I/O base/limit registers of
+ * bridges which support 32-bit I/O. This update requires two config space
+ * writes, so it's quite possible that an I/O window of the bridge will
+ * have some undesirable address (e.g. 0) after the first write. Ditto
+ * 64-bit prefetchable MMIO.
+ */
static void pci_setup_bridge_io(struct pci_dev *bridge)
{
struct resource *res;
@@ -587,7 +582,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
if (bridge->io_window_1k)
io_mask = PCI_IO_1K_RANGE_MASK;
- /* Set up the top and bottom of the PCI I/O segment for this bus. */
+ /* Set up the top and bottom of the PCI I/O segment for this bus */
res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
@@ -595,19 +590,19 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
io_base_lo = (region.start >> 8) & io_mask;
io_limit_lo = (region.end >> 8) & io_mask;
l = ((u16) io_limit_lo << 8) | io_base_lo;
- /* Set up upper 16 bits of I/O base/limit. */
+ /* Set up upper 16 bits of I/O base/limit */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
pci_info(bridge, " bridge window %pR\n", res);
} else {
- /* Clear upper 16 bits of I/O base/limit. */
+ /* Clear upper 16 bits of I/O base/limit */
io_upper16 = 0;
l = 0x00f0;
}
- /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
+ /* Temporarily disable the I/O range before updating PCI_IO_BASE */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
- /* Update lower 16 bits of I/O base/limit. */
+ /* Update lower 16 bits of I/O base/limit */
pci_write_config_word(bridge, PCI_IO_BASE, l);
- /* Update upper 16 bits of I/O base/limit. */
+ /* Update upper 16 bits of I/O base/limit */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
@@ -617,7 +612,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
struct pci_bus_region region;
u32 l;
- /* Set up the top and bottom of the PCI Memory segment for this bus. */
+ /* Set up the top and bottom of the PCI Memory segment for this bus */
res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
@@ -636,12 +631,14 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
struct pci_bus_region region;
u32 l, bu, lu;
- /* Clear out the upper 32 bits of PREF limit.
- If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
- disables PREF range, which is ok. */
+ /*
+ * Clear out the upper 32 bits of PREF limit. If
+ * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables
+ * PREF range, which is ok.
+ */
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
- /* Set up PREF base/limit. */
+ /* Set up PREF base/limit */
bu = lu = 0;
res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
pcibios_resource_to_bus(bridge->bus, &region, res);
@@ -658,7 +655,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
+ /* Set the upper 32 bits of PREF base & limit */
pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
}
@@ -702,13 +699,13 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
return 0;
if (pci_claim_resource(bridge, i) == 0)
- return 0; /* claimed the window */
+ return 0; /* Claimed the window */
if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return 0;
if (!pci_bus_clip_resource(bridge, i))
- return -EINVAL; /* clipping didn't change anything */
+ return -EINVAL; /* Clipping didn't change anything */
switch (i - PCI_BRIDGE_RESOURCES) {
case 0:
@@ -725,14 +722,16 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
}
if (pci_claim_resource(bridge, i) == 0)
- return 0; /* claimed a smaller window */
+ return 0; /* Claimed a smaller window */
return -EINVAL;
}
-/* Check whether the bridge supports optional I/O and
- prefetchable memory ranges. If not, the respective
- base/limit registers must be read-only and read as 0. */
+/*
+ * Check whether the bridge supports optional I/O and prefetchable memory
+ * ranges. If not, the respective base/limit registers must be read-only
+ * and read as 0.
+ */
static void pci_bridge_check_ranges(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
@@ -752,12 +751,14 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
}
-/* Helper function for sizing routines: find first available
- bus resource of a given type. Note: we intentionally skip
- the bus resources which have already been assigned (that is,
- have non-NULL parent resource). */
+/*
+ * Helper function for sizing routines: find first available bus resource
+ * of a given type. Note: we intentionally skip the bus resources which
+ * have already been assigned (that is, have non-NULL parent resource).
+ */
static struct resource *find_free_bus_resource(struct pci_bus *bus,
- unsigned long type_mask, unsigned long type)
+ unsigned long type_mask,
+ unsigned long type)
{
int i;
struct resource *r;
@@ -772,19 +773,21 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus,
}
static resource_size_t calculate_iosize(resource_size_t size,
- resource_size_t min_size,
- resource_size_t size1,
- resource_size_t add_size,
- resource_size_t children_add_size,
- resource_size_t old_size,
- resource_size_t align)
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
+ resource_size_t old_size,
+ resource_size_t align)
{
if (size < min_size)
size = min_size;
if (old_size == 1)
old_size = 0;
- /* To be fixed in 2.5: we should have sort of HAVE_ISA
- flag in the struct pci_bus. */
+ /*
+ * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the
+ * struct pci_bus.
+ */
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
@@ -797,11 +800,11 @@ static resource_size_t calculate_iosize(resource_size_t size,
}
static resource_size_t calculate_memsize(resource_size_t size,
- resource_size_t min_size,
- resource_size_t add_size,
- resource_size_t children_add_size,
- resource_size_t old_size,
- resource_size_t align)
+ resource_size_t min_size,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
+ resource_size_t old_size,
+ resource_size_t align)
{
if (size < min_size)
size = min_size;
@@ -824,8 +827,7 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
-static resource_size_t window_alignment(struct pci_bus *bus,
- unsigned long type)
+static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
{
resource_size_t align = 1, arch_align;
@@ -833,8 +835,8 @@ static resource_size_t window_alignment(struct pci_bus *bus,
align = PCI_P2P_DEFAULT_MEM_ALIGN;
else if (type & IORESOURCE_IO) {
/*
- * Per spec, I/O windows are 4K-aligned, but some
- * bridges have an extension to support 1K alignment.
+ * Per spec, I/O windows are 4K-aligned, but some bridges have
+ * an extension to support 1K alignment.
*/
if (bus->self->io_window_1k)
align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
@@ -847,20 +849,21 @@ static resource_size_t window_alignment(struct pci_bus *bus,
}
/**
- * pbus_size_io() - size the io window of a given bus
+ * pbus_size_io() - Size the I/O window of a given bus
*
- * @bus : the bus
- * @min_size : the minimum io window that must to be allocated
- * @add_size : additional optional io window
- * @realloc_head : track the additional io window on this list
+ * @bus: The bus
+ * @min_size: The minimum I/O window that must be allocated
+ * @add_size: Additional optional I/O window
+ * @realloc_head: Track the additional I/O window on this list
*
- * Sizing the IO windows of the PCI-PCI bridge is trivial,
- * since these windows have 1K or 4K granularity and the IO ranges
- * of non-bridge PCI devices are limited to 256 bytes.
- * We must be careful with the ISA aliasing though.
+ * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these
+ * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI
+ * devices are limited to 256 bytes. We must be careful with the ISA
+ * aliasing though.
*/
static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
- resource_size_t add_size, struct list_head *realloc_head)
+ resource_size_t add_size,
+ struct list_head *realloc_head)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
@@ -918,9 +921,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
- pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx\n",
- b_res, &bus->busn_res,
- (unsigned long long)size1-size0);
+ pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long) size1 - size0);
}
}
@@ -947,33 +950,33 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
}
/**
- * pbus_size_mem() - size the memory window of a given bus
+ * pbus_size_mem() - Size the memory window of a given bus
*
- * @bus : the bus
- * @mask: mask the resource flag, then compare it with type
- * @type: the type of free resource from bridge
- * @type2: second match type
- * @type3: third match type
- * @min_size : the minimum memory window that must to be allocated
- * @add_size : additional optional memory window
- * @realloc_head : track the additional memory window on this list
+ * @bus: The bus
+ * @mask: Mask the resource flag, then compare it with type
+ * @type: The type of free resource from bridge
+ * @type2: Second match type
+ * @type3: Third match type
+ * @min_size: The minimum memory window that must be allocated
+ * @add_size: Additional optional memory window
+ * @realloc_head: Track the additional memory window on this list
*
- * Calculate the size of the bus and minimal alignment which
- * guarantees that all child resources fit in this size.
+ * Calculate the size of the bus and minimal alignment which guarantees
+ * that all child resources fit in this size.
*
- * Returns -ENOSPC if there's no available bus resource of the desired type.
- * Otherwise, sets the bus resource start/end to indicate the required
- * size, adds things to realloc_head (if supplied), and returns 0.
+ * Return -ENOSPC if there's no available bus resource of the desired
+ * type. Otherwise, set the bus resource start/end to indicate the
+ * required size, add things to realloc_head (if supplied), and return 0.
*/
static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
unsigned long type, unsigned long type2,
- unsigned long type3,
- resource_size_t min_size, resource_size_t add_size,
+ unsigned long type3, resource_size_t min_size,
+ resource_size_t add_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
resource_size_t min_align, align, size, size0, size1;
- resource_size_t aligns[18]; /* Alignments from 1Mb to 128Gb */
+ resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus,
mask | IORESOURCE_PREFETCH, type);
@@ -1002,12 +1005,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
continue;
r_size = resource_size(r);
#ifdef CONFIG_PCI_IOV
- /* put SRIOV requested res to the optional list */
+ /* Put SRIOV requested res to the optional list */
if (realloc_head && i >= PCI_IOV_RESOURCES &&
i <= PCI_IOV_RESOURCE_END) {
add_align = max(pci_resource_alignment(dev, r), add_align);
r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
+ add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
children_add_size += r_size;
continue;
}
@@ -1029,8 +1032,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
continue;
}
size += max(r_size, align);
- /* Exclude ranges with size > align from
- calculation of the alignment. */
+ /*
+ * Exclude ranges with size > align from calculation of
+ * the alignment.
+ */
if (r_size <= align)
aligns[order] += align;
if (order > max_order)
@@ -1063,7 +1068,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->flags |= IORESOURCE_STARTALIGN;
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
- pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
+ pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
(unsigned long long) (size1 - size0),
(unsigned long long) add_align);
@@ -1081,7 +1086,7 @@ unsigned long pci_cardbus_resource_alignment(struct resource *res)
}
static void pci_bus_size_cardbus(struct pci_bus *bus,
- struct list_head *realloc_head)
+ struct list_head *realloc_head)
{
struct pci_dev *bridge = bus->self;
struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
@@ -1091,8 +1096,8 @@ static void pci_bus_size_cardbus(struct pci_bus *bus,
if (b_res[0].parent)
goto handle_b_res_1;
/*
- * Reserve some resources for CardBus. We reserve
- * a fixed amount of bus space for CardBus bridges.
+ * Reserve some resources for CardBus. We reserve a fixed amount
+ * of bus space for CardBus bridges.
*/
b_res[0].start = pci_cardbus_io_size;
b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
@@ -1116,7 +1121,7 @@ handle_b_res_1:
}
handle_b_res_2:
- /* MEM1 must not be pref mmio */
+ /* MEM1 must not be pref MMIO */
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
@@ -1124,10 +1129,7 @@ handle_b_res_2:
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
}
- /*
- * Check whether prefetchable memory is supported
- * by this bridge.
- */
+ /* Check whether prefetchable memory is supported by this bridge. */
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
@@ -1138,9 +1140,8 @@ handle_b_res_2:
if (b_res[2].parent)
goto handle_b_res_3;
/*
- * If we have prefetchable memory support, allocate
- * two regions. Otherwise, allocate one region of
- * twice the size.
+ * If we have prefetchable memory support, allocate two regions.
+ * Otherwise, allocate one region of twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
b_res[2].start = pci_cardbus_mem_size;
@@ -1153,7 +1154,7 @@ handle_b_res_2:
pci_cardbus_mem_size, pci_cardbus_mem_size);
}
- /* reduce that to half */
+ /* Reduce that to half */
b_res_3_size = pci_cardbus_mem_size;
}
@@ -1204,7 +1205,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
switch (bus->self->hdr_type) {
case PCI_HEADER_TYPE_CARDBUS:
- /* don't size cardbuses yet. */
+ /* Don't size CardBuses yet */
break;
case PCI_HEADER_TYPE_BRIDGE:
@@ -1271,18 +1272,17 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
/*
* Compute the size required to put everything else in the
- * non-prefetchable window. This includes:
+ * non-prefetchable window. This includes:
*
* - all non-prefetchable resources
* - 32-bit prefetchable resources if there's a 64-bit
* prefetchable window or no prefetchable window at all
- * - 64-bit prefetchable resources if there's no
- * prefetchable window at all
+ * - 64-bit prefetchable resources if there's no prefetchable
+ * window at all
*
- * Note that the strategy in __pci_assign_resource() must
- * match that used here. Specifically, we cannot put a
- * 32-bit prefetchable resource in a 64-bit prefetchable
- * window.
+ * Note that the strategy in __pci_assign_resource() must match
+ * that used here. Specifically, we cannot put a 32-bit
+ * prefetchable resource in a 64-bit prefetchable window.
*/
pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
realloc_head ? 0 : additional_mem_size,
@@ -1315,8 +1315,8 @@ static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
}
/*
- * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they
- * are skipped by pbus_assign_resources_sorted().
+ * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are
+ * skipped by pbus_assign_resources_sorted().
*/
static void pdev_assign_fixed_resources(struct pci_dev *dev)
{
@@ -1427,10 +1427,9 @@ static void pci_bus_allocate_resources(struct pci_bus *b)
struct pci_bus *child;
/*
- * Carry out a depth-first search on the PCI bus
- * tree to allocate bridge apertures. Read the
- * programmed bridge bases and recursively claim
- * the respective bridge resources.
+ * Carry out a depth-first search on the PCI bus tree to allocate
+ * bridge apertures. Read the programmed bridge bases and
+ * recursively claim the respective bridge resources.
*/
if (b->self) {
pci_read_bridge_bases(b);
@@ -1484,7 +1483,7 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
IORESOURCE_MEM_64)
static void pci_bridge_release_resources(struct pci_bus *bus,
- unsigned long type)
+ unsigned long type)
{
struct pci_dev *dev = bus->self;
struct resource *r;
@@ -1495,16 +1494,14 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
/*
- * 1. if there is io port assign fail, will release bridge
- * io port.
- * 2. if there is non pref mmio assign fail, release bridge
- * nonpref mmio.
- * 3. if there is 64bit pref mmio assign fail, and bridge pref
- * is 64bit, release bridge pref mmio.
- * 4. if there is pref mmio assign fail, and bridge pref is
- * 32bit mmio, release bridge pref mmio
- * 5. if there is pref mmio assign fail, and bridge pref is not
- * assigned, release bridge nonpref mmio.
+ * 1. If IO port assignment fails, release bridge IO port.
+ * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
+ * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
+ * release bridge pref MMIO.
+ * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
+ * release bridge pref MMIO.
+ * 5. If pref MMIO assignment fails, and bridge pref is not
+ * assigned, release bridge nonpref MMIO.
*/
if (type & IORESOURCE_IO)
idx = 0;
@@ -1524,25 +1521,22 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
if (!r->parent)
return;
- /*
- * if there are children under that, we should release them
- * all
- */
+ /* If there are children, release them all */
release_child_resources(r);
if (!release_resource(r)) {
type = old_flags = r->flags & PCI_RES_TYPE_MASK;
- pci_printk(KERN_DEBUG, dev, "resource %d %pR released\n",
- PCI_BRIDGE_RESOURCES + idx, r);
- /* keep the old size */
+ pci_info(dev, "resource %d %pR released\n",
+ PCI_BRIDGE_RESOURCES + idx, r);
+ /* Keep the old size */
r->end = resource_size(r) - 1;
r->start = 0;
r->flags = 0;
- /* avoiding touch the one without PREF */
+ /* Avoiding touch the one without PREF */
if (type & IORESOURCE_PREFETCH)
type = IORESOURCE_PREFETCH;
__pci_setup_bridge(bus, type);
- /* for next child res under same bridge */
+ /* For next child res under same bridge */
r->flags = old_flags;
}
}
@@ -1551,9 +1545,10 @@ enum release_type {
leaf_only,
whole_subtree,
};
+
/*
- * try to release pci bridge resources that is from leaf bridge,
- * so we can allocate big new one later
+ * Try to release PCI bridge resources from leaf bridge, so we can allocate
+ * a larger window later.
*/
static void pci_bus_release_bridge_resources(struct pci_bus *bus,
unsigned long type,
@@ -1596,7 +1591,7 @@ static void pci_bus_dump_res(struct pci_bus *bus)
if (!res || !res->end || !res->flags)
continue;
- dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
+ dev_info(&bus->dev, "resource %d %pR\n", i, res);
}
}
@@ -1678,7 +1673,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
pcibios_resource_to_bus(dev->bus, &region, r);
if (!region.start) {
*unassigned = true;
- return 1; /* return early from pci_walk_bus() */
+ return 1; /* Return early from pci_walk_bus() */
}
}
@@ -1686,7 +1681,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
}
static enum enable_type pci_realloc_detect(struct pci_bus *bus,
- enum enable_type enable_local)
+ enum enable_type enable_local)
{
bool unassigned = false;
@@ -1701,21 +1696,21 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
}
#else
static enum enable_type pci_realloc_detect(struct pci_bus *bus,
- enum enable_type enable_local)
+ enum enable_type enable_local)
{
return enable_local;
}
#endif
/*
- * first try will not touch pci bridge res
- * second and later try will clear small leaf bridge res
- * will stop till to the max depth if can not find good one
+ * First try will not touch PCI bridge res.
+ * Second and later try will clear small leaf bridge res.
+ * Will stop till to the max depth if can not find good one.
*/
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
{
- LIST_HEAD(realloc_head); /* list of resources that
- want additional resources */
+ LIST_HEAD(realloc_head);
+ /* List of resources that want additional resources */
struct list_head *add_list = NULL;
int tried_times = 0;
enum release_type rel_type = leaf_only;
@@ -1724,26 +1719,26 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
int pci_try_num = 1;
enum enable_type enable_local;
- /* don't realloc if asked to do so */
+ /* Don't realloc if asked to do so */
enable_local = pci_realloc_detect(bus, pci_realloc_enable);
if (pci_realloc_enabled(enable_local)) {
int max_depth = pci_bus_get_depth(bus);
pci_try_num = max_depth + 1;
- dev_printk(KERN_DEBUG, &bus->dev,
- "max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
+ dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
}
again:
/*
- * last try will use add_list, otherwise will try good to have as
- * must have, so can realloc parent bridge resource
+ * Last try will use add_list, otherwise will try good to have as must
+ * have, so can realloc parent bridge resource
*/
if (tried_times + 1 == pci_try_num)
add_list = &realloc_head;
- /* Depth first, calculate sizes and alignments of all
- subordinate buses. */
+ /*
+ * Depth first, calculate sizes and alignments of all subordinate buses.
+ */
__pci_bus_size_bridges(bus, add_list);
/* Depth last, allocate resources and update the hardware. */
@@ -1752,7 +1747,7 @@ again:
BUG_ON(!list_empty(add_list));
tried_times++;
- /* any device complain? */
+ /* Any device complain? */
if (list_empty(&fail_head))
goto dump;
@@ -1766,23 +1761,23 @@ again:
goto dump;
}
- dev_printk(KERN_DEBUG, &bus->dev,
- "No. %d try to assign unassigned res\n", tried_times + 1);
+ dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
+ tried_times + 1);
- /* third times and later will not check if it is leaf */
+ /* Third times and later will not check if it is leaf */
if ((tried_times + 1) > 2)
rel_type = whole_subtree;
/*
* Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge
+ * child device under that bridge.
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
fail_res->flags & PCI_RES_TYPE_MASK,
rel_type);
- /* restore size and flags */
+ /* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -1797,7 +1792,7 @@ again:
goto again;
dump:
- /* dump the resource on buses */
+ /* Dump the resource on buses */
pci_bus_dump_resources(bus);
}
@@ -1808,14 +1803,15 @@ void __init pci_assign_unassigned_resources(void)
list_for_each_entry(root_bus, &pci_root_buses, node) {
pci_assign_unassigned_root_bus_resources(root_bus);
- /* Make sure the root bridge has a companion ACPI device: */
+ /* Make sure the root bridge has a companion ACPI device */
if (ACPI_HANDLE(root_bus->bridge))
acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
}
}
static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
- struct list_head *add_list, resource_size_t available)
+ struct list_head *add_list,
+ resource_size_t available)
{
struct pci_dev_resource *dev_res;
@@ -1839,8 +1835,10 @@ static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
}
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
- struct list_head *add_list, resource_size_t available_io,
- resource_size_t available_mmio, resource_size_t available_mmio_pref)
+ struct list_head *add_list,
+ resource_size_t available_io,
+ resource_size_t available_mmio,
+ resource_size_t available_mmio_pref)
{
resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
unsigned int normal_bridges = 0, hotplug_bridges = 0;
@@ -1864,7 +1862,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
/*
* Calculate the total amount of extra resource space we can
- * pass to bridges below this one. This is basically the
+ * pass to bridges below this one. This is basically the
* extra space reduced by the minimal required space for the
* non-hotplug bridges.
*/
@@ -1874,7 +1872,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
/*
* Calculate how many hotplug bridges and normal bridges there
- * are on this bus. We will distribute the additional available
+ * are on this bus. We will distribute the additional available
* resources between hotplug bridges.
*/
for_each_pci_bridge(dev, bus) {
@@ -1909,8 +1907,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
/*
* There is only one bridge on the bus so it gets all available
- * resources which it can then distribute to the possible
- * hotplug bridges below.
+ * resources which it can then distribute to the possible hotplug
+ * bridges below.
*/
if (hotplug_bridges + normal_bridges == 1) {
dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
@@ -1961,9 +1959,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
}
-static void
-pci_bridge_distribute_available_resources(struct pci_dev *bridge,
- struct list_head *add_list)
+static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ struct list_head *add_list)
{
resource_size_t available_io, available_mmio, available_mmio_pref;
const struct resource *res;
@@ -1980,14 +1977,17 @@ pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref = resource_size(res);
pci_bus_distribute_available_resources(bridge->subordinate,
- add_list, available_io, available_mmio, available_mmio_pref);
+ add_list, available_io,
+ available_mmio,
+ available_mmio_pref);
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
- LIST_HEAD(add_list); /* list of resources that
- want additional resources */
+ /* List of resources that want additional resources */
+ LIST_HEAD(add_list);
+
int tried_times = 0;
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
@@ -1997,9 +1997,9 @@ again:
__pci_bus_size_bridges(parent, &add_list);
/*
- * Distribute remaining resources (if any) equally between
- * hotplug bridges below. This makes it possible to extend the
- * hierarchy later without running out of resources.
+ * Distribute remaining resources (if any) equally between hotplug
+ * bridges below. This makes it possible to extend the hierarchy
+ * later without running out of resources.
*/
pci_bridge_distribute_available_resources(bridge, &add_list);
@@ -2011,7 +2011,7 @@ again:
goto enable_all;
if (tried_times >= 2) {
- /* still fail, don't need to try more */
+ /* Still fail, don't need to try more */
free_list(&fail_head);
goto enable_all;
}
@@ -2020,15 +2020,15 @@ again:
tried_times + 1);
/*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge
+ * Try to release leaf bridge's resources that aren't big enough
+ * to contain child device resources.
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
fail_res->flags & PCI_RES_TYPE_MASK,
whole_subtree);
- /* restore size and flags */
+ /* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -2107,7 +2107,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
}
list_for_each_entry(dev_res, &saved, list) {
- /* Skip the bridge we just assigned resources for. */
+ /* Skip the bridge we just assigned resources for */
if (bridge == dev_res->dev)
continue;
@@ -2119,7 +2119,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
return 0;
cleanup:
- /* restore size and flags */
+ /* Restore size and flags */
list_for_each_entry(dev_res, &failed, list) {
struct resource *res = dev_res->res;
@@ -2151,8 +2151,8 @@ cleanup:
void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
{
struct pci_dev *dev;
- LIST_HEAD(add_list); /* list of resources that
- want additional resources */
+ /* List of resources that want additional resources */
+ LIST_HEAD(add_list);
down_read(&pci_bus_sem);
for_each_pci_bridge(dev, bus)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index c46d5e1ff536..f4d92b1afe7b 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -403,7 +403,7 @@ static int pci_slot_init(void)
pci_slots_kset = kset_create_and_add("slots", NULL,
&pci_bus_kset->kobj);
if (!pci_slots_kset) {
- printk(KERN_ERR "PCI: Slot initialization failure\n");
+ pr_err("PCI: Slot initialization failure\n");
return -ENOMEM;
}
return 0;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 0f7b80144863..bebbde4ebec0 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -658,19 +658,25 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
static int ioctl_event_summary(struct switchtec_dev *stdev,
struct switchtec_user *stuser,
- struct switchtec_ioctl_event_summary __user *usum)
+ struct switchtec_ioctl_event_summary __user *usum,
+ size_t size)
{
- struct switchtec_ioctl_event_summary s = {0};
+ struct switchtec_ioctl_event_summary *s;
int i;
u32 reg;
+ int ret = 0;
- s.global = ioread32(&stdev->mmio_sw_event->global_summary);
- s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
- s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->global = ioread32(&stdev->mmio_sw_event->global_summary);
+ s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
- s.part[i] = reg;
+ s->part[i] = reg;
}
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
@@ -679,15 +685,19 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
break;
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
- s.pff[i] = reg;
+ s->pff[i] = reg;
}
- if (copy_to_user(usum, &s, sizeof(s)))
- return -EFAULT;
+ if (copy_to_user(usum, s, size)) {
+ ret = -EFAULT;
+ goto error_case;
+ }
stuser->event_cnt = atomic_read(&stdev->event_cnt);
- return 0;
+error_case:
+ kfree(s);
+ return ret;
}
static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
@@ -977,8 +987,9 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
case SWITCHTEC_IOCTL_FLASH_PART_INFO:
rc = ioctl_flash_part_info(stdev, argp);
break;
- case SWITCHTEC_IOCTL_EVENT_SUMMARY:
- rc = ioctl_event_summary(stdev, stuser, argp);
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
+ rc = ioctl_event_summary(stdev, stuser, argp,
+ sizeof(struct switchtec_ioctl_event_summary_legacy));
break;
case SWITCHTEC_IOCTL_EVENT_CTL:
rc = ioctl_event_ctl(stdev, argp);
@@ -989,6 +1000,10 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
case SWITCHTEC_IOCTL_PORT_TO_PFF:
rc = ioctl_port_to_pff(stdev, argp);
break;
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY:
+ rc = ioctl_event_summary(stdev, stuser, argp,
+ sizeof(struct switchtec_ioctl_event_summary));
+ break;
default:
rc = -ENOTTY;
break;
@@ -1162,7 +1177,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
- if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
+ if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
+ eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
return 0;
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index eba6e33147a2..d1b16cf3403f 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -291,8 +291,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
vector[i] = op.msix_entries[i].vector;
}
} else {
- printk(KERN_DEBUG "enable msix get value %x\n",
- op.value);
+ pr_info("enable msix get value %x\n", op.value);
err = op.value;
}
} else {
@@ -364,12 +363,12 @@ static void pci_frontend_disable_msi(struct pci_dev *dev)
err = do_pci_op(pdev, &op);
if (err == XEN_PCI_ERR_dev_not_found) {
/* XXX No response from backend, what shall we do? */
- printk(KERN_DEBUG "get no response from backend for disable MSI\n");
+ pr_info("get no response from backend for disable MSI\n");
return;
}
if (err)
/* how can pciback notify us fail? */
- printk(KERN_DEBUG "get fake response frombackend\n");
+ pr_info("get fake response from backend\n");
}
static struct xen_pci_frontend_ops pci_frontend_ops = {
@@ -1104,7 +1103,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
case XenbusStateClosed:
if (xdev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
dev_warn(&xdev->dev, "backend going away!\n");
pcifront_try_disconnect(pdev);
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index c2a17a79f0b2..267fb875e40f 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -22,7 +22,7 @@
#include <mach/hardware.h>
#include <asm/io.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/mux.h>
#include <mach/tc.h>
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 19d8af9a36a2..ea798548b012 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -273,6 +273,20 @@ config PINCTRL_ST
select PINCONF
select GPIOLIB_IRQCHIP
+config PINCTRL_STMFX
+ tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
+ depends on I2C
+ depends on OF || COMPILE_TEST
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select MFD_STMFX
+ help
+ Driver for STMicroelectronics Multi-Function eXpander (STMFX)
+ GPIO expander.
+ This provides a GPIO interface supporting inputs and outputs,
+ and configuring push-pull, open-drain, and can also be used as
+ interrupt-controller.
+
config PINCTRL_U300
bool "U300 pin controller driver"
depends on ARCH_U300
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 62df40647e02..ac537fdbc998 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
+obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index fd9d6f026d70..f0cdb5234e49 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -655,115 +655,6 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/seq_file.h>
-
-/*
- * This compares the chip's registers with the register
- * cache and corrects any incorrectly set register. This
- * can be used to fix state for MCP23xxx, that temporary
- * lost its power supply.
- */
-#define MCP23S08_CONFIG_REGS 7
-static int __check_mcp23s08_reg_cache(struct mcp23s08 *mcp)
-{
- int cached[MCP23S08_CONFIG_REGS];
- int err = 0, i;
-
- /* read cached config registers */
- for (i = 0; i < MCP23S08_CONFIG_REGS; i++) {
- err = mcp_read(mcp, i, &cached[i]);
- if (err)
- goto out;
- }
-
- regcache_cache_bypass(mcp->regmap, true);
-
- for (i = 0; i < MCP23S08_CONFIG_REGS; i++) {
- int uncached;
- err = mcp_read(mcp, i, &uncached);
- if (err)
- goto out;
-
- if (uncached != cached[i]) {
- dev_err(mcp->dev, "restoring reg 0x%02x from 0x%04x to 0x%04x (power-loss?)\n",
- i, uncached, cached[i]);
- mcp_write(mcp, i, cached[i]);
- }
- }
-
-out:
- if (err)
- dev_err(mcp->dev, "read error: reg=%02x, err=%d", i, err);
- regcache_cache_bypass(mcp->regmap, false);
- return err;
-}
-
-/*
- * This shows more info than the generic gpio dump code:
- * pullups, deglitching, open drain drive.
- */
-static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
-{
- struct mcp23s08 *mcp;
- char bank;
- int t;
- unsigned mask;
- int iodir, gpio, gppu;
-
- mcp = gpiochip_get_data(chip);
-
- /* NOTE: we only handle one bank for now ... */
- bank = '0' + ((mcp->addr >> 1) & 0x7);
-
- mutex_lock(&mcp->lock);
-
- t = __check_mcp23s08_reg_cache(mcp);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
- t = mcp_read(mcp, MCP_IODIR, &iodir);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
- t = mcp_read(mcp, MCP_GPIO, &gpio);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
- t = mcp_read(mcp, MCP_GPPU, &gppu);
- if (t) {
- seq_printf(s, " I/O Error\n");
- goto done;
- }
-
- for (t = 0, mask = BIT(0); t < chip->ngpio; t++, mask <<= 1) {
- const char *label;
-
- label = gpiochip_is_requested(chip, t);
- if (!label)
- continue;
-
- seq_printf(s, " gpio-%-3d P%c.%d (%-12s) %s %s %s\n",
- chip->base + t, bank, t, label,
- (iodir & mask) ? "in " : "out",
- (gpio & mask) ? "hi" : "lo",
- (gppu & mask) ? "up" : " ");
- /* NOTE: ignoring the irq-related registers */
- }
-done:
- mutex_unlock(&mcp->lock);
-}
-
-#else
-#define mcp23s08_dbg_show NULL
-#endif
-
-/*----------------------------------------------------------------------*/
-
static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
void *data, unsigned addr, unsigned type,
unsigned int base, int cs)
@@ -784,7 +675,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.get = mcp23s08_get;
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
- mcp->chip.dbg_show = mcp23s08_dbg_show;
#ifdef CONFIG_OF_GPIO
mcp->chip.of_gpio_n_cells = 2;
mcp->chip.of_node = dev->of_node;
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
new file mode 100644
index 000000000000..eba872ce4a7c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STMicroelectronics Multi-Function eXpander (STMFX) GPIO expander
+ *
+ * Copyright (C) 2019 STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>.
+ */
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/stmfx.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+/* GPIOs expander */
+/* GPIO_STATE1 0x10, GPIO_STATE2 0x11, GPIO_STATE3 0x12 */
+#define STMFX_REG_GPIO_STATE STMFX_REG_GPIO_STATE1 /* R */
+/* GPIO_DIR1 0x60, GPIO_DIR2 0x61, GPIO_DIR3 0x63 */
+#define STMFX_REG_GPIO_DIR STMFX_REG_GPIO_DIR1 /* RW */
+/* GPIO_TYPE1 0x64, GPIO_TYPE2 0x65, GPIO_TYPE3 0x66 */
+#define STMFX_REG_GPIO_TYPE STMFX_REG_GPIO_TYPE1 /* RW */
+/* GPIO_PUPD1 0x68, GPIO_PUPD2 0x69, GPIO_PUPD3 0x6A */
+#define STMFX_REG_GPIO_PUPD STMFX_REG_GPIO_PUPD1 /* RW */
+/* GPO_SET1 0x6C, GPO_SET2 0x6D, GPO_SET3 0x6E */
+#define STMFX_REG_GPO_SET STMFX_REG_GPO_SET1 /* RW */
+/* GPO_CLR1 0x70, GPO_CLR2 0x71, GPO_CLR3 0x72 */
+#define STMFX_REG_GPO_CLR STMFX_REG_GPO_CLR1 /* RW */
+/* IRQ_GPI_SRC1 0x48, IRQ_GPI_SRC2 0x49, IRQ_GPI_SRC3 0x4A */
+#define STMFX_REG_IRQ_GPI_SRC STMFX_REG_IRQ_GPI_SRC1 /* RW */
+/* IRQ_GPI_EVT1 0x4C, IRQ_GPI_EVT2 0x4D, IRQ_GPI_EVT3 0x4E */
+#define STMFX_REG_IRQ_GPI_EVT STMFX_REG_IRQ_GPI_EVT1 /* RW */
+/* IRQ_GPI_TYPE1 0x50, IRQ_GPI_TYPE2 0x51, IRQ_GPI_TYPE3 0x52 */
+#define STMFX_REG_IRQ_GPI_TYPE STMFX_REG_IRQ_GPI_TYPE1 /* RW */
+/* IRQ_GPI_PENDING1 0x0C, IRQ_GPI_PENDING2 0x0D, IRQ_GPI_PENDING3 0x0E*/
+#define STMFX_REG_IRQ_GPI_PENDING STMFX_REG_IRQ_GPI_PENDING1 /* R */
+/* IRQ_GPI_ACK1 0x54, IRQ_GPI_ACK2 0x55, IRQ_GPI_ACK3 0x56 */
+#define STMFX_REG_IRQ_GPI_ACK STMFX_REG_IRQ_GPI_ACK1 /* RW */
+
+#define NR_GPIO_REGS 3
+#define NR_GPIOS_PER_REG 8
+#define get_reg(offset) ((offset) / NR_GPIOS_PER_REG)
+#define get_shift(offset) ((offset) % NR_GPIOS_PER_REG)
+#define get_mask(offset) (BIT(get_shift(offset)))
+
+/*
+ * STMFX pinctrl can have up to 24 pins if STMFX other functions are not used.
+ * Pins availability is managed thanks to gpio-ranges property.
+ */
+static const struct pinctrl_pin_desc stmfx_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "agpio0"),
+ PINCTRL_PIN(17, "agpio1"),
+ PINCTRL_PIN(18, "agpio2"),
+ PINCTRL_PIN(19, "agpio3"),
+ PINCTRL_PIN(20, "agpio4"),
+ PINCTRL_PIN(21, "agpio5"),
+ PINCTRL_PIN(22, "agpio6"),
+ PINCTRL_PIN(23, "agpio7"),
+};
+
+struct stmfx_pinctrl {
+ struct device *dev;
+ struct stmfx *stmfx;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+ struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+ struct mutex lock; /* IRQ bus lock */
+ unsigned long gpio_valid_mask;
+ /* Cache of IRQ_GPI_* registers for bus_lock */
+ u8 irq_gpi_src[NR_GPIO_REGS];
+ u8 irq_gpi_type[NR_GPIO_REGS];
+ u8 irq_gpi_evt[NR_GPIO_REGS];
+ u8 irq_toggle_edge[NR_GPIO_REGS];
+#ifdef CONFIG_PM
+ /* Backup of GPIO_* registers for suspend/resume */
+ u8 bkp_gpio_state[NR_GPIO_REGS];
+ u8 bkp_gpio_dir[NR_GPIO_REGS];
+ u8 bkp_gpio_type[NR_GPIO_REGS];
+ u8 bkp_gpio_pupd[NR_GPIO_REGS];
+#endif
+};
+
+static int stmfx_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_STATE + get_reg(offset);
+ u32 mask = get_mask(offset);
+ u32 value;
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &value);
+
+ return ret ? ret : !!(value & mask);
+}
+
+static void stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = value ? STMFX_REG_GPO_SET : STMFX_REG_GPO_CLR;
+ u32 mask = get_mask(offset);
+
+ regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset),
+ mask, mask);
+}
+
+static int stmfx_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
+ u32 mask = get_mask(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &val);
+ /*
+ * On stmfx, gpio pins direction is (0)input, (1)output.
+ * .get_direction returns 0=out, 1=in
+ */
+
+ return ret ? ret : !(val & mask);
+}
+
+static int stmfx_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, 0);
+}
+
+static int stmfx_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ stmfx_gpio_set(gc, offset, value);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, mask);
+}
+
+static int stmfx_pinconf_get_pupd(struct stmfx_pinctrl *pctl,
+ unsigned int offset)
+{
+ u32 reg = STMFX_REG_GPIO_PUPD + get_reg(offset);
+ u32 pupd, mask = get_mask(offset);
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &pupd);
+ if (ret)
+ return ret;
+
+ return !!(pupd & mask);
+}
+
+static int stmfx_pinconf_set_pupd(struct stmfx_pinctrl *pctl,
+ unsigned int offset, u32 pupd)
+{
+ u32 reg = STMFX_REG_GPIO_PUPD + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, pupd ? mask : 0);
+}
+
+static int stmfx_pinconf_get_type(struct stmfx_pinctrl *pctl,
+ unsigned int offset)
+{
+ u32 reg = STMFX_REG_GPIO_TYPE + get_reg(offset);
+ u32 type, mask = get_mask(offset);
+ int ret;
+
+ ret = regmap_read(pctl->stmfx->map, reg, &type);
+ if (ret)
+ return ret;
+
+ return !!(type & mask);
+}
+
+static int stmfx_pinconf_set_type(struct stmfx_pinctrl *pctl,
+ unsigned int offset, u32 type)
+{
+ u32 reg = STMFX_REG_GPIO_TYPE + get_reg(offset);
+ u32 mask = get_mask(offset);
+
+ return regmap_write_bits(pctl->stmfx->map, reg, mask, type ? mask : 0);
+}
+
+static int stmfx_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct stmfx_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ u32 param = pinconf_to_config_param(*config);
+ struct pinctrl_gpio_range *range;
+ u32 arg = 0;
+ int ret, dir, type, pupd;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ if (!range)
+ return -EINVAL;
+
+ dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
+ if (dir < 0)
+ return dir;
+ type = stmfx_pinconf_get_type(pctl, pin);
+ if (type < 0)
+ return type;
+ pupd = stmfx_pinconf_get_pupd(pctl, pin);
+ if (pupd < 0)
+ return pupd;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if ((!dir && (!type || !pupd)) || (dir && !type))
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (dir && type && !pupd)
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (type && pupd)
+ arg = 1;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if ((!dir && type) || (dir && !type))
+ arg = 1;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if ((!dir && !type) || (dir && type))
+ arg = 1;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ if (dir)
+ return -EINVAL;
+
+ ret = stmfx_gpio_get(&pctl->gpio_chip, pin);
+ if (ret < 0)
+ return ret;
+
+ arg = ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct stmfx_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_gpio_range *range;
+ enum pin_config_param param;
+ u32 arg;
+ int dir, i, ret;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ if (!range) {
+ dev_err(pctldev->dev, "pin %d is not available\n", pin);
+ return -EINVAL;
+ }
+
+ dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
+ if (dir < 0)
+ return dir;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = stmfx_pinconf_set_pupd(pctl, pin, 0);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!dir)
+ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ else
+ ret = stmfx_pinconf_set_type(pctl, pin, 0);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (!dir)
+ ret = stmfx_pinconf_set_type(pctl, pin, 0);
+ else
+ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ ret = stmfx_gpio_direction_output(&pctl->gpio_chip,
+ pin, arg);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset)
+{
+ struct stmfx_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_gpio_range *range;
+ int dir, type, pupd, val;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, offset);
+ if (!range)
+ return;
+
+ dir = stmfx_gpio_get_direction(&pctl->gpio_chip, offset);
+ if (dir < 0)
+ return;
+ type = stmfx_pinconf_get_type(pctl, offset);
+ if (type < 0)
+ return;
+ pupd = stmfx_pinconf_get_pupd(pctl, offset);
+ if (pupd < 0)
+ return;
+ val = stmfx_gpio_get(&pctl->gpio_chip, offset);
+ if (val < 0)
+ return;
+
+ if (!dir) {
+ seq_printf(s, "output %s ", val ? "high" : "low");
+ if (type)
+ seq_printf(s, "open drain %s internal pull-up ",
+ pupd ? "with" : "without");
+ else
+ seq_puts(s, "push pull no pull ");
+ } else {
+ seq_printf(s, "input %s ", val ? "high" : "low");
+ if (type)
+ seq_printf(s, "with internal pull-%s ",
+ pupd ? "up" : "down");
+ else
+ seq_printf(s, "%s ", pupd ? "floating" : "analog");
+ }
+}
+
+static const struct pinconf_ops stmfx_pinconf_ops = {
+ .pin_config_get = stmfx_pinconf_get,
+ .pin_config_set = stmfx_pinconf_set,
+ .pin_config_dbg_show = stmfx_pinconf_dbg_show,
+};
+
+static int stmfx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static const char *stmfx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return NULL;
+}
+
+static int stmfx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ return -ENOTSUPP;
+}
+
+static const struct pinctrl_ops stmfx_pinctrl_ops = {
+ .get_groups_count = stmfx_pinctrl_get_groups_count,
+ .get_group_name = stmfx_pinctrl_get_group_name,
+ .get_group_pins = stmfx_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static void stmfx_pinctrl_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ pctl->irq_gpi_src[reg] &= ~mask;
+}
+
+static void stmfx_pinctrl_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ pctl->irq_gpi_src[reg] |= mask;
+}
+
+static int stmfx_pinctrl_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ if (type == IRQ_TYPE_NONE)
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_BOTH) {
+ pctl->irq_gpi_evt[reg] |= mask;
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ pctl->irq_gpi_evt[reg] &= ~mask;
+ irq_set_handler_locked(data, handle_level_irq);
+ }
+
+ if ((type & IRQ_TYPE_EDGE_RISING) || (type & IRQ_TYPE_LEVEL_HIGH))
+ pctl->irq_gpi_type[reg] |= mask;
+ else
+ pctl->irq_gpi_type[reg] &= ~mask;
+
+ /*
+ * In case of (type & IRQ_TYPE_EDGE_BOTH), we need to know current
+ * GPIO value to set the right edge trigger. But in atomic context
+ * here we can't access registers over I2C. That's why (type &
+ * IRQ_TYPE_EDGE_BOTH) will be managed in .irq_sync_unlock.
+ */
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ pctl->irq_toggle_edge[reg] |= mask;
+ else
+ pctl->irq_toggle_edge[reg] &= mask;
+
+ return 0;
+}
+
+static void stmfx_pinctrl_irq_bus_lock(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+
+ mutex_lock(&pctl->lock);
+}
+
+static void stmfx_pinctrl_irq_bus_sync_unlock(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gpio_chip);
+ u32 reg = get_reg(data->hwirq);
+ u32 mask = get_mask(data->hwirq);
+
+ /*
+ * In case of IRQ_TYPE_EDGE_BOTH), read the current GPIO value
+ * (this couldn't be done in .irq_set_type because of atomic context)
+ * to set the right irq trigger type.
+ */
+ if (pctl->irq_toggle_edge[reg] & mask) {
+ if (stmfx_gpio_get(gpio_chip, data->hwirq))
+ pctl->irq_gpi_type[reg] &= ~mask;
+ else
+ pctl->irq_gpi_type[reg] |= mask;
+ }
+
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT,
+ pctl->irq_gpi_evt, NR_GPIO_REGS);
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE,
+ pctl->irq_gpi_type, NR_GPIO_REGS);
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ pctl->irq_gpi_src, NR_GPIO_REGS);
+
+ mutex_unlock(&pctl->lock);
+}
+
+static void stmfx_pinctrl_irq_toggle_trigger(struct stmfx_pinctrl *pctl,
+ unsigned int offset)
+{
+ u32 reg = get_reg(offset);
+ u32 mask = get_mask(offset);
+ int val;
+
+ if (!(pctl->irq_toggle_edge[reg] & mask))
+ return;
+
+ val = stmfx_gpio_get(&pctl->gpio_chip, offset);
+ if (val < 0)
+ return;
+
+ if (val) {
+ pctl->irq_gpi_type[reg] &= mask;
+ regmap_write_bits(pctl->stmfx->map,
+ STMFX_REG_IRQ_GPI_TYPE + reg,
+ mask, 0);
+
+ } else {
+ pctl->irq_gpi_type[reg] |= mask;
+ regmap_write_bits(pctl->stmfx->map,
+ STMFX_REG_IRQ_GPI_TYPE + reg,
+ mask, mask);
+ }
+}
+
+static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
+{
+ struct stmfx_pinctrl *pctl = (struct stmfx_pinctrl *)dev_id;
+ struct gpio_chip *gc = &pctl->gpio_chip;
+ u8 pending[NR_GPIO_REGS];
+ u8 src[NR_GPIO_REGS] = {0, 0, 0};
+ unsigned long n, status;
+ int ret;
+
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
+ &pending, NR_GPIO_REGS);
+ if (ret)
+ return IRQ_NONE;
+
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ src, NR_GPIO_REGS);
+
+ status = *(unsigned long *)pending;
+ for_each_set_bit(n, &status, gc->ngpio) {
+ handle_nested_irq(irq_find_mapping(gc->irq.domain, n));
+ stmfx_pinctrl_irq_toggle_trigger(pctl, n);
+ }
+
+ regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ pctl->irq_gpi_src, NR_GPIO_REGS);
+
+ return IRQ_HANDLED;
+}
+
+static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl)
+{
+ struct pinctrl_gpio_range *gpio_range;
+ struct pinctrl_dev *pctl_dev = pctl->pctl_dev;
+ u32 func = STMFX_FUNC_GPIO;
+
+ pctl->gpio_valid_mask = GENMASK(15, 0);
+
+ gpio_range = pinctrl_find_gpio_range_from_pin(pctl_dev, 16);
+ if (gpio_range) {
+ func |= STMFX_FUNC_ALTGPIO_LOW;
+ pctl->gpio_valid_mask |= GENMASK(19, 16);
+ }
+
+ gpio_range = pinctrl_find_gpio_range_from_pin(pctl_dev, 20);
+ if (gpio_range) {
+ func |= STMFX_FUNC_ALTGPIO_HIGH;
+ pctl->gpio_valid_mask |= GENMASK(23, 20);
+ }
+
+ return stmfx_function_enable(pctl->stmfx, func);
+}
+
+static int stmfx_pinctrl_probe(struct platform_device *pdev)
+{
+ struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
+ struct device_node *np = pdev->dev.of_node;
+ struct stmfx_pinctrl *pctl;
+ u32 n;
+ int irq, ret;
+
+ pctl = devm_kzalloc(stmfx->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pctl);
+
+ pctl->dev = &pdev->dev;
+ pctl->stmfx = stmfx;
+
+ if (!of_find_property(np, "gpio-ranges", NULL)) {
+ dev_err(pctl->dev, "missing required gpio-ranges property\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(pctl->dev, "failed to get irq\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&pctl->lock);
+
+ /* Register pin controller */
+ pctl->pctl_desc.name = "stmfx-pinctrl";
+ pctl->pctl_desc.pctlops = &stmfx_pinctrl_ops;
+ pctl->pctl_desc.confops = &stmfx_pinconf_ops;
+ pctl->pctl_desc.pins = stmfx_pins;
+ pctl->pctl_desc.npins = ARRAY_SIZE(stmfx_pins);
+ pctl->pctl_desc.owner = THIS_MODULE;
+
+ ret = devm_pinctrl_register_and_init(pctl->dev, &pctl->pctl_desc,
+ pctl, &pctl->pctl_dev);
+ if (ret) {
+ dev_err(pctl->dev, "pinctrl registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(pctl->pctl_dev);
+ if (ret) {
+ dev_err(pctl->dev, "pinctrl enable failed\n");
+ return ret;
+ }
+
+ /* Register gpio controller */
+ pctl->gpio_chip.label = "stmfx-gpio";
+ pctl->gpio_chip.parent = pctl->dev;
+ pctl->gpio_chip.get_direction = stmfx_gpio_get_direction;
+ pctl->gpio_chip.direction_input = stmfx_gpio_direction_input;
+ pctl->gpio_chip.direction_output = stmfx_gpio_direction_output;
+ pctl->gpio_chip.get = stmfx_gpio_get;
+ pctl->gpio_chip.set = stmfx_gpio_set;
+ pctl->gpio_chip.set_config = gpiochip_generic_config;
+ pctl->gpio_chip.base = -1;
+ pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
+ pctl->gpio_chip.can_sleep = true;
+ pctl->gpio_chip.of_node = np;
+ pctl->gpio_chip.need_valid_mask = true;
+
+ ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
+ if (ret) {
+ dev_err(pctl->dev, "gpio_chip registration failed\n");
+ return ret;
+ }
+
+ ret = stmfx_pinctrl_gpio_function_enable(pctl);
+ if (ret)
+ return ret;
+
+ pctl->irq_chip.name = dev_name(pctl->dev);
+ pctl->irq_chip.irq_mask = stmfx_pinctrl_irq_mask;
+ pctl->irq_chip.irq_unmask = stmfx_pinctrl_irq_unmask;
+ pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
+ pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
+ pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
+ for_each_clear_bit(n, &pctl->gpio_valid_mask, pctl->gpio_chip.ngpio)
+ clear_bit(n, pctl->gpio_chip.valid_mask);
+
+ ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
+ 0, handle_bad_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(pctl->dev, "cannot add irqchip to gpiochip\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(pctl->dev, irq, NULL,
+ stmfx_pinctrl_irq_thread_fn,
+ IRQF_ONESHOT,
+ pctl->irq_chip.name, pctl);
+ if (ret) {
+ dev_err(pctl->dev, "cannot request irq%d\n", irq);
+ return ret;
+ }
+
+ gpiochip_set_nested_irqchip(&pctl->gpio_chip, &pctl->irq_chip, irq);
+
+ dev_info(pctl->dev,
+ "%ld GPIOs available\n", hweight_long(pctl->gpio_valid_mask));
+
+ return 0;
+}
+
+static int stmfx_pinctrl_remove(struct platform_device *pdev)
+{
+ struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+
+ return stmfx_function_disable(stmfx,
+ STMFX_FUNC_GPIO |
+ STMFX_FUNC_ALTGPIO_LOW |
+ STMFX_FUNC_ALTGPIO_HIGH);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stmfx_pinctrl_backup_regs(struct stmfx_pinctrl *pctl)
+{
+ int ret;
+
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_STATE,
+ &pctl->bkp_gpio_state, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_DIR,
+ &pctl->bkp_gpio_dir, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_TYPE,
+ &pctl->bkp_gpio_type, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_PUPD,
+ &pctl->bkp_gpio_pupd, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int stmfx_pinctrl_restore_regs(struct stmfx_pinctrl *pctl)
+{
+ int ret;
+
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_DIR,
+ pctl->bkp_gpio_dir, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_TYPE,
+ pctl->bkp_gpio_type, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_PUPD,
+ pctl->bkp_gpio_pupd, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPO_SET,
+ pctl->bkp_gpio_state, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT,
+ pctl->irq_gpi_evt, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE,
+ pctl->irq_gpi_type, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+ ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
+ pctl->irq_gpi_src, NR_GPIO_REGS);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int stmfx_pinctrl_suspend(struct device *dev)
+{
+ struct stmfx_pinctrl *pctl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = stmfx_pinctrl_backup_regs(pctl);
+ if (ret) {
+ dev_err(pctl->dev, "registers backup failure\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stmfx_pinctrl_resume(struct device *dev)
+{
+ struct stmfx_pinctrl *pctl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = stmfx_pinctrl_restore_regs(pctl);
+ if (ret) {
+ dev_err(pctl->dev, "registers restoration failure\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stmfx_pinctrl_dev_pm_ops,
+ stmfx_pinctrl_suspend, stmfx_pinctrl_resume);
+
+static const struct of_device_id stmfx_pinctrl_of_match[] = {
+ { .compatible = "st,stmfx-0300-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stmfx_pinctrl_of_match);
+
+static struct platform_driver stmfx_pinctrl_driver = {
+ .driver = {
+ .name = "stmfx-pinctrl",
+ .of_match_table = stmfx_pinctrl_of_match,
+ .pm = &stmfx_pinctrl_dev_pm_ops,
+ },
+ .probe = stmfx_pinctrl_probe,
+ .remove = stmfx_pinctrl_remove,
+};
+module_platform_driver(stmfx_pinctrl_driver);
+
+MODULE_DESCRIPTION("STMFX pinctrl/GPIO driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 9186d81a51cc..997317d2f2b9 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -59,6 +59,18 @@ config CROS_EC_I2C
a checksum. Failing accesses will be retried three times to
improve reliability.
+config CROS_EC_RPMSG
+ tristate "ChromeOS Embedded Controller (rpmsg)"
+ depends on MFD_CROS_EC && RPMSG && OF
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ through rpmsg. This uses a simple byte-level protocol with a
+ checksum. Also since there's no addition EC-to-host interrupt, this
+ use a byte in message to distinguish host event from host command.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_rpmsg.
+
config CROS_EC_SPI
tristate "ChromeOS Embedded Controller (SPI)"
depends on MFD_CROS_EC && SPI
@@ -152,6 +164,18 @@ config CROS_EC_SYSFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_sysfs.
+config CROS_USBPD_LOGGER
+ tristate "Logging driver for USB PD charger"
+ depends on CHARGER_CROS_USBPD
+ default y
+ select RTC_LIB
+ help
+ This option enables support for logging event data for the USB PD charger
+ available in the Embedded Controller on ChromeOS systems.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_usbpd_logger.
+
source "drivers/platform/chrome/wilco_ec/Kconfig"
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 1e2f0029b597..1b2f1dcfcd5c 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,18 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
+# tell define_trace.h where to find the cros ec trace header
+CFLAGS_cros_ec_trace.o:= -I$(src)
+
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o
obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o
+obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
-obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
+obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
+obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
obj-$(CONFIG_WILCO_EC) += wilco_ec/
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 24326eecd787..7abbb6167766 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -125,7 +125,7 @@ static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
return false;
pdev = to_pci_dev(dev);
- return devid == PCI_DEVID(pdev->bus->number, pdev->devfn);
+ return devid == pci_dev_id(pdev);
}
static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 2b8e8a01a739..4c2a27f6a6d0 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -72,15 +72,9 @@ static void cros_ec_console_log_work(struct work_struct *__work)
int buf_space;
int ret;
- ret = cros_ec_cmd_xfer(ec->ec_dev, &snapshot_msg);
- if (ret < 0) {
- dev_err(ec->dev, "EC communication failed\n");
- goto resched;
- }
- if (snapshot_msg.result != EC_RES_SUCCESS) {
- dev_err(ec->dev, "EC failed to snapshot the console log\n");
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, &snapshot_msg);
+ if (ret < 0)
goto resched;
- }
/* Loop until we have read everything, or there's an error. */
mutex_lock(&debug_info->log_mutex);
@@ -95,16 +89,10 @@ static void cros_ec_console_log_work(struct work_struct *__work)
memset(read_params, '\0', sizeof(*read_params));
read_params->subcmd = CONSOLE_READ_RECENT;
- ret = cros_ec_cmd_xfer(ec->ec_dev, debug_info->read_msg);
- if (ret < 0) {
- dev_err(ec->dev, "EC communication failed\n");
- break;
- }
- if (debug_info->read_msg->result != EC_RES_SUCCESS) {
- dev_err(ec->dev,
- "EC failed to read the console log\n");
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev,
+ debug_info->read_msg);
+ if (ret < 0)
break;
- }
/* If the buffer is empty, we're done here. */
if (ret == 0 || ec_buffer[0] == '\0')
@@ -290,9 +278,8 @@ static int ec_read_version_supported(struct cros_ec_dev *ec)
params->cmd = EC_CMD_CONSOLE_READ;
response = (struct ec_response_get_cmd_versions *)msg->data;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg) >= 0 &&
- msg->result == EC_RES_SUCCESS &&
- (response->version_mask & EC_VER_MASK(1));
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg) >= 0 &&
+ response->version_mask & EC_VER_MASK(1);
kfree(msg);
@@ -306,11 +293,12 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
int read_params_size;
int read_response_size;
- if (!ec_read_version_supported(ec)) {
- dev_warn(ec->dev,
- "device does not support reading the console log\n");
+ /*
+ * If the console log feature is not supported return silently and
+ * don't create the console_log entry.
+ */
+ if (!ec_read_version_supported(ec))
return 0;
- }
buf = devm_kzalloc(ec->dev, LOG_SIZE, GFP_KERNEL);
if (!buf)
@@ -336,12 +324,8 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
mutex_init(&debug_info->log_mutex);
init_waitqueue_head(&debug_info->log_wq);
- if (!debugfs_create_file("console_log",
- S_IFREG | 0444,
- debug_info->dir,
- debug_info,
- &cros_ec_console_log_fops))
- return -ENOMEM;
+ debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
+ debug_info, &cros_ec_console_log_fops);
INIT_DELAYED_WORK(&debug_info->log_poll_work,
cros_ec_console_log_work);
@@ -375,9 +359,8 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
msg->command = EC_CMD_GET_PANIC_INFO;
msg->insize = insize;
- ret = cros_ec_cmd_xfer(ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
if (ret < 0) {
- dev_warn(debug_info->ec->dev, "Cannot read panicinfo.\n");
ret = 0;
goto free;
}
@@ -389,13 +372,8 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
debug_info->panicinfo_blob.data = msg->data;
debug_info->panicinfo_blob.size = ret;
- if (!debugfs_create_blob("panicinfo",
- S_IFREG | 0444,
- debug_info->dir,
- &debug_info->panicinfo_blob)) {
- ret = -ENOMEM;
- goto free;
- }
+ debugfs_create_blob("panicinfo", S_IFREG | 0444, debug_info->dir,
+ &debug_info->panicinfo_blob);
return 0;
@@ -404,15 +382,6 @@ free:
return ret;
}
-static int cros_ec_create_pdinfo(struct cros_ec_debugfs *debug_info)
-{
- if (!debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
- &cros_ec_pdinfo_fops))
- return -ENOMEM;
-
- return 0;
-}
-
static int cros_ec_debugfs_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent);
@@ -427,8 +396,6 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
debug_info->ec = ec;
debug_info->dir = debugfs_create_dir(name, NULL);
- if (!debug_info->dir)
- return -ENOMEM;
ret = cros_ec_create_panicinfo(debug_info);
if (ret)
@@ -438,9 +405,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
if (ret)
goto remove_debugfs;
- ret = cros_ec_create_pdinfo(debug_info);
- if (ret)
- goto remove_log;
+ debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
+ &cros_ec_pdinfo_fops);
ec->debug_info = debug_info;
@@ -448,8 +414,6 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
return 0;
-remove_log:
- cros_ec_cleanup_console_log(debug_info);
remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 97a068dff192..3d2325197a68 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
+#include "cros_ec_trace.h"
+
#define EC_COMMAND_RETRIES 50
static int prepare_packet(struct cros_ec_device *ec_dev,
@@ -51,11 +53,24 @@ static int send_command(struct cros_ec_device *ec_dev,
int ret;
int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
+ trace_cros_ec_cmd(msg);
+
if (ec_dev->proto_version > 2)
xfer_fxn = ec_dev->pkt_xfer;
else
xfer_fxn = ec_dev->cmd_xfer;
+ if (!xfer_fxn) {
+ /*
+ * This error can happen if a communication error happened and
+ * the EC is trying to use protocol v2, on an underlying
+ * communication mechanism that does not support v2.
+ */
+ dev_err_once(ec_dev->dev,
+ "missing EC transfer API, cannot send command\n");
+ return -EIO;
+ }
+
ret = (*xfer_fxn)(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
@@ -414,6 +429,12 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
else
ec_dev->mkbp_event_supported = 1;
+ /* Probe if host sleep v1 is supported for S0ix failure detection. */
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_HOST_SLEEP_EVENT,
+ &ver_mask);
+ ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
+
/*
* Get host event wake mask, assume all events are wake events
* if unavailable.
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
new file mode 100644
index 000000000000..5d3fb2abad1d
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2018 Google LLC.
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+
+#define EC_MSG_TIMEOUT_MS 200
+#define HOST_COMMAND_MARK 1
+#define HOST_EVENT_MARK 2
+
+/**
+ * struct cros_ec_rpmsg_response - rpmsg message format from from EC.
+ *
+ * @type: The type of message, should be either HOST_COMMAND_MARK or
+ * HOST_EVENT_MARK, representing that the message is a response to
+ * host command, or a host event.
+ * @data: ec_host_response for host command.
+ */
+struct cros_ec_rpmsg_response {
+ u8 type;
+ u8 data[] __aligned(4);
+};
+
+/**
+ * struct cros_ec_rpmsg - information about a EC over rpmsg.
+ *
+ * @rpdev: rpmsg device we are connected to
+ * @xfer_ack: completion for host command transfer.
+ * @host_event_work: Work struct for pending host event.
+ */
+struct cros_ec_rpmsg {
+ struct rpmsg_device *rpdev;
+ struct completion xfer_ack;
+ struct work_struct host_event_work;
+};
+
+/**
+ * cros_ec_cmd_xfer_rpmsg - Transfer a message over rpmsg and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ *
+ * This is only used for old EC proto version, and is not supported for this
+ * driver.
+ *
+ * Return: -EINVAL
+ */
+static int cros_ec_cmd_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return -EINVAL;
+}
+
+/**
+ * cros_ec_pkt_xfer_rpmsg - Transfer a packet over rpmsg and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ *
+ * Return: number of bytes of the reply on success or negative error code.
+ */
+static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+ struct rpmsg_device *rpdev = ec_rpmsg->rpdev;
+ struct ec_host_response *response;
+ unsigned long timeout;
+ int len;
+ int ret;
+ u8 sum;
+ int i;
+
+ ec_msg->result = 0;
+ len = cros_ec_prepare_tx(ec_dev, ec_msg);
+ dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
+
+ reinit_completion(&ec_rpmsg->xfer_ack);
+ ret = rpmsg_send(rpdev->ept, ec_dev->dout, len);
+ if (ret) {
+ dev_err(ec_dev->dev, "rpmsg send failed\n");
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(EC_MSG_TIMEOUT_MS);
+ ret = wait_for_completion_timeout(&ec_rpmsg->xfer_ack, timeout);
+ if (!ret) {
+ dev_err(ec_dev->dev, "rpmsg send timeout\n");
+ return -EIO;
+ }
+
+ /* check response error code */
+ response = (struct ec_host_response *)ec_dev->din;
+ ec_msg->result = response->result;
+
+ ret = cros_ec_check_result(ec_dev, ec_msg);
+ if (ret)
+ goto exit;
+
+ if (response->data_len > ec_msg->insize) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ response->data_len, ec_msg->insize);
+ ret = -EMSGSIZE;
+ goto exit;
+ }
+
+ /* copy response packet payload and compute checksum */
+ memcpy(ec_msg->data, ec_dev->din + sizeof(*response),
+ response->data_len);
+
+ sum = 0;
+ for (i = 0; i < sizeof(*response) + response->data_len; i++)
+ sum += ec_dev->din[i];
+
+ if (sum) {
+ dev_err(ec_dev->dev, "bad packet checksum, calculated %x\n",
+ sum);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ ret = response->data_len;
+exit:
+ if (ec_msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+static void
+cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
+{
+ struct cros_ec_rpmsg *ec_rpmsg = container_of(host_event_work,
+ struct cros_ec_rpmsg,
+ host_event_work);
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&ec_rpmsg->rpdev->dev);
+ bool wake_event = true;
+ int ret;
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event);
+
+ /*
+ * Signal only if wake host events or any interrupt if
+ * cros_ec_get_next_event() returned an error (default value for
+ * wake_event is true)
+ */
+ if (wake_event && device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+
+ if (ret > 0)
+ blocking_notifier_call_chain(&ec_dev->event_notifier,
+ 0, ec_dev);
+}
+
+static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+ struct cros_ec_rpmsg_response *resp;
+
+ if (!len) {
+ dev_warn(ec_dev->dev, "rpmsg received empty response");
+ return -EINVAL;
+ }
+
+ resp = data;
+ len -= offsetof(struct cros_ec_rpmsg_response, data);
+ if (resp->type == HOST_COMMAND_MARK) {
+ if (len > ec_dev->din_size) {
+ dev_warn(ec_dev->dev,
+ "received length %d > din_size %d, truncating",
+ len, ec_dev->din_size);
+ len = ec_dev->din_size;
+ }
+
+ memcpy(ec_dev->din, resp->data, len);
+ complete(&ec_rpmsg->xfer_ack);
+ } else if (resp->type == HOST_EVENT_MARK) {
+ schedule_work(&ec_rpmsg->host_event_work);
+ } else {
+ dev_warn(ec_dev->dev, "rpmsg received invalid type = %d",
+ resp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct cros_ec_rpmsg *ec_rpmsg;
+ struct cros_ec_device *ec_dev;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ ec_rpmsg = devm_kzalloc(dev, sizeof(*ec_rpmsg), GFP_KERNEL);
+ if (!ec_rpmsg)
+ return -ENOMEM;
+
+ ec_dev->dev = dev;
+ ec_dev->priv = ec_rpmsg;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_rpmsg;
+ ec_dev->pkt_xfer = cros_ec_pkt_xfer_rpmsg;
+ ec_dev->phys_name = dev_name(&rpdev->dev);
+ ec_dev->din_size = sizeof(struct ec_host_response) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct ec_host_request);
+ dev_set_drvdata(dev, ec_dev);
+
+ ec_rpmsg->rpdev = rpdev;
+ init_completion(&ec_rpmsg->xfer_ack);
+ INIT_WORK(&ec_rpmsg->host_event_work,
+ cros_ec_rpmsg_host_event_function);
+
+ return cros_ec_register(ec_dev);
+}
+
+static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+
+ cancel_work_sync(&ec_rpmsg->host_event_work);
+}
+
+static const struct of_device_id cros_ec_rpmsg_of_match[] = {
+ { .compatible = "google,cros-ec-rpmsg", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cros_ec_rpmsg_of_match);
+
+static struct rpmsg_driver cros_ec_driver_rpmsg = {
+ .drv = {
+ .name = "cros-ec-rpmsg",
+ .of_match_table = cros_ec_rpmsg_of_match,
+ },
+ .probe = cros_ec_rpmsg_probe,
+ .remove = cros_ec_rpmsg_remove,
+ .callback = cros_ec_rpmsg_callback,
+};
+
+module_rpmsg_driver(cros_ec_driver_rpmsg);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS EC multi function device (rpmsg)");
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index ffc38f9d4829..8e9451720e73 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -75,6 +75,27 @@ struct cros_ec_spi {
unsigned int end_of_msg_delay;
};
+typedef int (*cros_ec_xfer_fn_t) (struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg);
+
+/**
+ * struct cros_ec_xfer_work_params - params for our high priority workers
+ *
+ * @work: The work_struct needed to queue work
+ * @fn: The function to use to transfer
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ * @ret: The return value of the function
+ */
+
+struct cros_ec_xfer_work_params {
+ struct work_struct work;
+ cros_ec_xfer_fn_t fn;
+ struct cros_ec_device *ec_dev;
+ struct cros_ec_command *ec_msg;
+ int ret;
+};
+
static void debug_packet(struct device *dev, const char *name, u8 *ptr,
int len)
{
@@ -350,13 +371,13 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
}
/**
- * cros_ec_pkt_xfer_spi - Transfer a packet over SPI and receive the reply
+ * do_cros_ec_pkt_xfer_spi - Transfer a packet over SPI and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*/
-static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
- struct cros_ec_command *ec_msg)
+static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
{
struct ec_host_response *response;
struct cros_ec_spi *ec_spi = ec_dev->priv;
@@ -493,13 +514,13 @@ exit:
}
/**
- * cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply
+ * do_cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*/
-static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
- struct cros_ec_command *ec_msg)
+static int do_cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
{
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_transfer trans;
@@ -611,6 +632,53 @@ exit:
return ret;
}
+static void cros_ec_xfer_high_pri_work(struct work_struct *work)
+{
+ struct cros_ec_xfer_work_params *params;
+
+ params = container_of(work, struct cros_ec_xfer_work_params, work);
+ params->ret = params->fn(params->ec_dev, params->ec_msg);
+}
+
+static int cros_ec_xfer_high_pri(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg,
+ cros_ec_xfer_fn_t fn)
+{
+ struct cros_ec_xfer_work_params params;
+
+ INIT_WORK_ONSTACK(&params.work, cros_ec_xfer_high_pri_work);
+ params.ec_dev = ec_dev;
+ params.ec_msg = ec_msg;
+ params.fn = fn;
+
+ /*
+ * This looks a bit ridiculous. Why do the work on a
+ * different thread if we're just going to block waiting for
+ * the thread to finish? The key here is that the thread is
+ * running at high priority but the calling context might not
+ * be. We need to be at high priority to avoid getting
+ * context switched out for too long and the EC giving up on
+ * the transfer.
+ */
+ queue_work(system_highpri_wq, &params.work);
+ flush_work(&params.work);
+ destroy_work_on_stack(&params.work);
+
+ return params.ret;
+}
+
+static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_pkt_xfer_spi);
+}
+
+static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_cmd_xfer_spi);
+}
+
static void cros_ec_spi_dt_probe(struct cros_ec_spi *ec_spi, struct device *dev)
{
struct device_node *np = dev->of_node;
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
new file mode 100644
index 000000000000..0a76412095a9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+// Trace events for the ChromeOS Embedded Controller
+//
+// Copyright 2019 Google LLC.
+
+#define TRACE_SYMBOL(a) {a, #a}
+
+// Generate the list using the following script:
+// sed -n 's/^#define \(EC_CMD_[[:alnum:]_]*\)\s.*/\tTRACE_SYMBOL(\1), \\/p' include/linux/mfd/cros_ec_commands.h
+#define EC_CMDS \
+ TRACE_SYMBOL(EC_CMD_PROTO_VERSION), \
+ TRACE_SYMBOL(EC_CMD_HELLO), \
+ TRACE_SYMBOL(EC_CMD_GET_VERSION), \
+ TRACE_SYMBOL(EC_CMD_READ_TEST), \
+ TRACE_SYMBOL(EC_CMD_GET_BUILD_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_CHIP_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_BOARD_VERSION), \
+ TRACE_SYMBOL(EC_CMD_READ_MEMMAP), \
+ TRACE_SYMBOL(EC_CMD_GET_CMD_VERSIONS), \
+ TRACE_SYMBOL(EC_CMD_GET_COMMS_STATUS), \
+ TRACE_SYMBOL(EC_CMD_TEST_PROTOCOL), \
+ TRACE_SYMBOL(EC_CMD_GET_PROTOCOL_INFO), \
+ TRACE_SYMBOL(EC_CMD_GSV_PAUSE_IN_S5), \
+ TRACE_SYMBOL(EC_CMD_GET_FEATURES), \
+ TRACE_SYMBOL(EC_CMD_FLASH_INFO), \
+ TRACE_SYMBOL(EC_CMD_FLASH_READ), \
+ TRACE_SYMBOL(EC_CMD_FLASH_WRITE), \
+ TRACE_SYMBOL(EC_CMD_FLASH_ERASE), \
+ TRACE_SYMBOL(EC_CMD_FLASH_PROTECT), \
+ TRACE_SYMBOL(EC_CMD_FLASH_REGION_INFO), \
+ TRACE_SYMBOL(EC_CMD_VBNV_CONTEXT), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_FAN_TARGET_RPM), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_TARGET_RPM), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_DUTY), \
+ TRACE_SYMBOL(EC_CMD_PWM_SET_DUTY), \
+ TRACE_SYMBOL(EC_CMD_PWM_GET_DUTY), \
+ TRACE_SYMBOL(EC_CMD_LIGHTBAR_CMD), \
+ TRACE_SYMBOL(EC_CMD_LED_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_VBOOT_HASH), \
+ TRACE_SYMBOL(EC_CMD_MOTION_SENSE_CMD), \
+ TRACE_SYMBOL(EC_CMD_USB_CHARGE_SET_MODE), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_INFO), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_READ), \
+ TRACE_SYMBOL(EC_CMD_PSTORE_WRITE), \
+ TRACE_SYMBOL(EC_CMD_RTC_GET_VALUE), \
+ TRACE_SYMBOL(EC_CMD_RTC_GET_ALARM), \
+ TRACE_SYMBOL(EC_CMD_RTC_SET_VALUE), \
+ TRACE_SYMBOL(EC_CMD_RTC_SET_ALARM), \
+ TRACE_SYMBOL(EC_CMD_PORT80_LAST_BOOT), \
+ TRACE_SYMBOL(EC_CMD_PORT80_READ), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_SET_THRESHOLD), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_GET_THRESHOLD), \
+ TRACE_SYMBOL(EC_CMD_THERMAL_AUTO_FAN_CTRL), \
+ TRACE_SYMBOL(EC_CMD_TMP006_GET_CALIBRATION), \
+ TRACE_SYMBOL(EC_CMD_TMP006_SET_CALIBRATION), \
+ TRACE_SYMBOL(EC_CMD_TMP006_GET_RAW), \
+ TRACE_SYMBOL(EC_CMD_MKBP_STATE), \
+ TRACE_SYMBOL(EC_CMD_MKBP_INFO), \
+ TRACE_SYMBOL(EC_CMD_MKBP_SIMULATE_KEY), \
+ TRACE_SYMBOL(EC_CMD_MKBP_SET_CONFIG), \
+ TRACE_SYMBOL(EC_CMD_MKBP_GET_CONFIG), \
+ TRACE_SYMBOL(EC_CMD_KEYSCAN_SEQ_CTRL), \
+ TRACE_SYMBOL(EC_CMD_GET_NEXT_EVENT), \
+ TRACE_SYMBOL(EC_CMD_TEMP_SENSOR_GET_INFO), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_B), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SMI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SCI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_WAKE_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SMI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SCI_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_WAKE_MASK), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR_B), \
+ TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_BKLIGHT), \
+ TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_WIRELESS), \
+ TRACE_SYMBOL(EC_CMD_GPIO_SET), \
+ TRACE_SYMBOL(EC_CMD_GPIO_GET), \
+ TRACE_SYMBOL(EC_CMD_I2C_READ), \
+ TRACE_SYMBOL(EC_CMD_I2C_WRITE), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_CONSOLE_SNAPSHOT), \
+ TRACE_SYMBOL(EC_CMD_CONSOLE_READ), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_CUT_OFF), \
+ TRACE_SYMBOL(EC_CMD_USB_MUX), \
+ TRACE_SYMBOL(EC_CMD_LDO_SET), \
+ TRACE_SYMBOL(EC_CMD_LDO_GET), \
+ TRACE_SYMBOL(EC_CMD_POWER_INFO), \
+ TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU), \
+ TRACE_SYMBOL(EC_CMD_HANG_DETECT), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_STATE), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_CURRENT_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_EXTERNAL_POWER_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_HOST_SLEEP_EVENT), \
+ TRACE_SYMBOL(EC_CMD_SB_READ_WORD), \
+ TRACE_SYMBOL(EC_CMD_SB_WRITE_WORD), \
+ TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
+ TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
+ TRACE_SYMBOL(EC_CMD_CODEC_I2S), \
+ TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
+ TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
+ TRACE_SYMBOL(EC_CMD_ACPI_READ), \
+ TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
+ TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+ TRACE_SYMBOL(EC_CMD_CEC_SET), \
+ TRACE_SYMBOL(EC_CMD_CEC_GET), \
+ TRACE_SYMBOL(EC_CMD_REBOOT), \
+ TRACE_SYMBOL(EC_CMD_RESEND_RESPONSE), \
+ TRACE_SYMBOL(EC_CMD_VERSION0), \
+ TRACE_SYMBOL(EC_CMD_PD_EXCHANGE_STATUS), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_PORTS), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_POWER_INFO), \
+ TRACE_SYMBOL(EC_CMD_CHARGE_PORT_COUNT), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_DISCOVERY), \
+ TRACE_SYMBOL(EC_CMD_PD_CHARGE_PORT_OVERRIDE), \
+ TRACE_SYMBOL(EC_CMD_PD_GET_LOG_ENTRY), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO)
+
+#define CREATE_TRACE_POINTS
+#include "cros_ec_trace.h"
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
new file mode 100644
index 000000000000..7ae3b89c78b9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Embedded Controller
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/mfd/cros_ec.h>
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(cros_ec_cmd_class,
+ TP_PROTO(struct cros_ec_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(uint32_t, version)
+ __field(uint32_t, command)
+ ),
+ TP_fast_assign(
+ __entry->version = cmd->version;
+ __entry->command = cmd->command;
+ ),
+ TP_printk("version: %u, command: %s", __entry->version,
+ __print_symbolic(__entry->command, EC_CMDS))
+);
+
+
+DEFINE_EVENT(cros_ec_cmd_class, cros_ec_cmd,
+ TP_PROTO(struct cros_ec_command *cmd),
+ TP_ARGS(cmd)
+);
+
+
+#endif /* _CROS_EC_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
new file mode 100644
index 000000000000..7c7b267626a0
--- /dev/null
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Logging driver for ChromeOS EC based USBPD Charger.
+ *
+ * Copyright 2018 Google LLC.
+ */
+
+#include <linux/ktime.h>
+#include <linux/math64.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define DRV_NAME "cros-usbpd-logger"
+
+#define CROS_USBPD_MAX_LOG_ENTRIES 30
+#define CROS_USBPD_LOG_UPDATE_DELAY msecs_to_jiffies(60000)
+#define CROS_USBPD_DATA_SIZE 16
+#define CROS_USBPD_LOG_RESP_SIZE (sizeof(struct ec_response_pd_log) + \
+ CROS_USBPD_DATA_SIZE)
+#define CROS_USBPD_BUFFER_SIZE (sizeof(struct cros_ec_command) + \
+ CROS_USBPD_LOG_RESP_SIZE)
+/* Buffer for building the PDLOG string */
+#define BUF_SIZE 80
+
+struct logger_data {
+ struct device *dev;
+ struct cros_ec_dev *ec_dev;
+ u8 ec_buffer[CROS_USBPD_BUFFER_SIZE];
+ struct delayed_work log_work;
+ struct workqueue_struct *log_workqueue;
+};
+
+static const char * const chg_type_names[] = {
+ "None", "PD", "Type-C", "Proprietary", "DCP", "CDP", "SDP",
+ "Other", "VBUS"
+};
+
+static const char * const role_names[] = {
+ "Disconnected", "SRC", "SNK", "SNK (not charging)"
+};
+
+static const char * const fault_names[] = {
+ "---", "OCP", "fast OCP", "OVP", "Discharge"
+};
+
+static int append_str(char *buf, int pos, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf + pos, BUF_SIZE - pos, fmt, args);
+ va_end(args);
+
+ return i;
+}
+
+static struct ec_response_pd_log *ec_get_log_entry(struct logger_data *logger)
+{
+ struct cros_ec_dev *ec_dev = logger->ec_dev;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = (struct cros_ec_command *)logger->ec_buffer;
+
+ msg->command = ec_dev->cmd_offset + EC_CMD_PD_GET_LOG_ENTRY;
+ msg->insize = CROS_USBPD_LOG_RESP_SIZE;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev->ec_dev, msg);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return (struct ec_response_pd_log *)msg->data;
+}
+
+static void cros_usbpd_print_log_entry(struct ec_response_pd_log *r,
+ ktime_t tstamp)
+{
+ const char *fault, *role, *chg_type;
+ struct usb_chg_measures *meas;
+ struct mcdp_info *minfo;
+ int role_idx, type_idx;
+ char buf[BUF_SIZE + 1];
+ struct rtc_time rt;
+ int len = 0;
+ s32 rem;
+ int i;
+
+ /* The timestamp is the number of 1024th of seconds in the past */
+ tstamp = ktime_sub_us(tstamp, r->timestamp << PD_LOG_TIMESTAMP_SHIFT);
+ rt = rtc_ktime_to_tm(tstamp);
+
+ switch (r->type) {
+ case PD_EVENT_MCU_CHARGE:
+ if (r->data & CHARGE_FLAGS_OVERRIDE)
+ len += append_str(buf, len, "override ");
+
+ if (r->data & CHARGE_FLAGS_DELAYED_OVERRIDE)
+ len += append_str(buf, len, "pending_override ");
+
+ role_idx = r->data & CHARGE_FLAGS_ROLE_MASK;
+ role = role_idx < ARRAY_SIZE(role_names) ?
+ role_names[role_idx] : "Unknown";
+
+ type_idx = (r->data & CHARGE_FLAGS_TYPE_MASK)
+ >> CHARGE_FLAGS_TYPE_SHIFT;
+
+ chg_type = type_idx < ARRAY_SIZE(chg_type_names) ?
+ chg_type_names[type_idx] : "???";
+
+ if (role_idx == USB_PD_PORT_POWER_DISCONNECTED ||
+ role_idx == USB_PD_PORT_POWER_SOURCE) {
+ len += append_str(buf, len, "%s", role);
+ break;
+ }
+
+ meas = (struct usb_chg_measures *)r->payload;
+ len += append_str(buf, len, "%s %s %s %dmV max %dmV / %dmA",
+ role, r->data & CHARGE_FLAGS_DUAL_ROLE ?
+ "DRP" : "Charger",
+ chg_type, meas->voltage_now,
+ meas->voltage_max, meas->current_max);
+ break;
+ case PD_EVENT_ACC_RW_FAIL:
+ len += append_str(buf, len, "RW signature check failed");
+ break;
+ case PD_EVENT_PS_FAULT:
+ fault = r->data < ARRAY_SIZE(fault_names) ? fault_names[r->data]
+ : "???";
+ len += append_str(buf, len, "Power supply fault: %s", fault);
+ break;
+ case PD_EVENT_VIDEO_DP_MODE:
+ len += append_str(buf, len, "DP mode %sabled", r->data == 1 ?
+ "en" : "dis");
+ break;
+ case PD_EVENT_VIDEO_CODEC:
+ minfo = (struct mcdp_info *)r->payload;
+ len += append_str(buf, len, "HDMI info: family:%04x chipid:%04x ",
+ MCDP_FAMILY(minfo->family),
+ MCDP_CHIPID(minfo->chipid));
+ len += append_str(buf, len, "irom:%d.%d.%d fw:%d.%d.%d",
+ minfo->irom.major, minfo->irom.minor,
+ minfo->irom.build, minfo->fw.major,
+ minfo->fw.minor, minfo->fw.build);
+ break;
+ default:
+ len += append_str(buf, len, "Event %02x (%04x) [", r->type,
+ r->data);
+
+ for (i = 0; i < PD_LOG_SIZE(r->size_port); i++)
+ len += append_str(buf, len, "%02x ", r->payload[i]);
+
+ len += append_str(buf, len, "]");
+ break;
+ }
+
+ div_s64_rem(ktime_to_ms(tstamp), MSEC_PER_SEC, &rem);
+ pr_info("PDLOG %d/%02d/%02d %02d:%02d:%02d.%03d P%d %s\n",
+ rt.tm_year + 1900, rt.tm_mon + 1, rt.tm_mday,
+ rt.tm_hour, rt.tm_min, rt.tm_sec, rem,
+ PD_LOG_PORT(r->size_port), buf);
+}
+
+static void cros_usbpd_log_check(struct work_struct *work)
+{
+ struct logger_data *logger = container_of(to_delayed_work(work),
+ struct logger_data,
+ log_work);
+ struct device *dev = logger->dev;
+ struct ec_response_pd_log *r;
+ int entries = 0;
+ ktime_t now;
+
+ while (entries++ < CROS_USBPD_MAX_LOG_ENTRIES) {
+ r = ec_get_log_entry(logger);
+ now = ktime_get_real();
+ if (IS_ERR(r)) {
+ dev_dbg(dev, "Cannot get PD log %ld\n", PTR_ERR(r));
+ break;
+ }
+ if (r->type == PD_EVENT_NO_ENTRY)
+ break;
+
+ cros_usbpd_print_log_entry(r, now);
+ }
+
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+}
+
+static int cros_usbpd_logger_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+ struct device *dev = &pd->dev;
+ struct logger_data *logger;
+
+ logger = devm_kzalloc(dev, sizeof(*logger), GFP_KERNEL);
+ if (!logger)
+ return -ENOMEM;
+
+ logger->dev = dev;
+ logger->ec_dev = ec_dev;
+
+ platform_set_drvdata(pd, logger);
+
+ /* Retrieve PD event logs periodically */
+ INIT_DELAYED_WORK(&logger->log_work, cros_usbpd_log_check);
+ logger->log_workqueue = create_singlethread_workqueue("cros_usbpd_log");
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+
+ return 0;
+}
+
+static int cros_usbpd_logger_remove(struct platform_device *pd)
+{
+ struct logger_data *logger = platform_get_drvdata(pd);
+
+ cancel_delayed_work_sync(&logger->log_work);
+
+ return 0;
+}
+
+static int __maybe_unused cros_usbpd_logger_resume(struct device *dev)
+{
+ struct logger_data *logger = dev_get_drvdata(dev);
+
+ queue_delayed_work(logger->log_workqueue, &logger->log_work,
+ CROS_USBPD_LOG_UPDATE_DELAY);
+
+ return 0;
+}
+
+static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
+{
+ struct logger_data *logger = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&logger->log_work);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
+ cros_usbpd_logger_resume);
+
+static struct platform_driver cros_usbpd_logger_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_usbpd_logger_pm_ops,
+ },
+ .probe = cros_usbpd_logger_probe,
+ .remove = cros_usbpd_logger_remove,
+};
+
+module_platform_driver(cros_usbpd_logger_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index c090db2cd5be..f163476d080d 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -4,31 +4,7 @@
*
* Copyright 2019 Google LLC
*
- * There is only one attribute used for debugging, called raw.
- * You can write a hexadecimal sentence to raw, and that series of bytes
- * will be sent to the EC. Then, you can read the bytes of response
- * by reading from raw.
- *
- * For writing:
- * Bytes 0-1 indicate the message type:
- * 00 F0 = Execute Legacy Command
- * 00 F2 = Read/Write NVRAM Property
- * Byte 2 provides the command code
- * Bytes 3+ consist of the data passed in the request
- *
- * When referencing the EC interface spec, byte 2 corresponds to MBOX[0],
- * byte 3 corresponds to MBOX[1], etc.
- *
- * At least three bytes are required, for the msg type and command,
- * with additional bytes optional for additional data.
- *
- * Example:
- * // Request EC info type 3 (EC firmware build date)
- * $ echo 00 f0 38 00 03 00 > raw
- * // View the result. The decoded ASCII result "12/21/18" is
- * // included after the raw hex.
- * $ cat raw
- * 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 .12/21/18.8...
+ * See Documentation/ABI/testing/debugfs-wilco-ec for usage.
*/
#include <linux/ctype.h>
@@ -136,18 +112,15 @@ static ssize_t raw_write(struct file *file, const char __user *user_buf,
ret = parse_hex_sentence(buf, kcount, request_data, TYPE_AND_DATA_SIZE);
if (ret < 0)
return ret;
- /* Need at least two bytes for message type and one for command */
+ /* Need at least two bytes for message type and one byte of data */
if (ret < 3)
return -EINVAL;
- /* Clear response data buffer */
- memset(debug_info->raw_data, '\0', EC_MAILBOX_DATA_SIZE_EXTENDED);
-
msg.type = request_data[0] << 8 | request_data[1];
- msg.flags = WILCO_EC_FLAG_RAW;
- msg.command = request_data[2];
- msg.request_data = ret > 3 ? request_data + 3 : 0;
- msg.request_size = ret - 3;
+ msg.flags = 0;
+ msg.request_data = request_data + 2;
+ msg.request_size = ret - 2;
+ memset(debug_info->raw_data, 0, sizeof(debug_info->raw_data));
msg.response_data = debug_info->raw_data;
msg.response_size = EC_MAILBOX_DATA_SIZE;
@@ -174,7 +147,8 @@ static ssize_t raw_read(struct file *file, char __user *user_buf, size_t count,
fmt_len = hex_dump_to_buffer(debug_info->raw_data,
debug_info->response_size,
16, 1, debug_info->formatted_data,
- FORMATTED_BUFFER_SIZE, true);
+ sizeof(debug_info->formatted_data),
+ true);
/* Only return response the first time it is read */
debug_info->response_size = 0;
}
@@ -190,6 +164,51 @@ static const struct file_operations fops_raw = {
.llseek = no_llseek,
};
+#define CMD_KB_CHROME 0x88
+#define SUB_CMD_H1_GPIO 0x0A
+
+struct h1_gpio_status_request {
+ u8 cmd; /* Always CMD_KB_CHROME */
+ u8 reserved;
+ u8 sub_cmd; /* Always SUB_CMD_H1_GPIO */
+} __packed;
+
+struct hi_gpio_status_response {
+ u8 status; /* 0 if allowed */
+ u8 val; /* BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL */
+} __packed;
+
+static int h1_gpio_get(void *arg, u64 *val)
+{
+ struct wilco_ec_device *ec = arg;
+ struct h1_gpio_status_request rq;
+ struct hi_gpio_status_response rs;
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_KB_CHROME;
+ rq.sub_cmd = SUB_CMD_H1_GPIO;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &rq;
+ msg.request_size = sizeof(rq);
+ msg.response_data = &rs;
+ msg.response_size = sizeof(rs);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (rs.status)
+ return -EIO;
+
+ *val = rs.val;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
+
/**
* wilco_ec_debugfs_probe() - Create the debugfs node
* @pdev: The platform device, probably created in core.c
@@ -211,6 +230,8 @@ static int wilco_ec_debugfs_probe(struct platform_device *pdev)
if (!debug_info->dir)
return 0;
debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
+ debugfs_create_file("h1_gpio", 0444, debug_info->dir, ec,
+ &fops_h1_gpio);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index 14355668ddfa..7fb58b487963 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -92,21 +92,10 @@ static void wilco_ec_prepare(struct wilco_ec_message *msg,
struct wilco_ec_request *rq)
{
memset(rq, 0, sizeof(*rq));
-
- /* Handle messages without trimming bytes from the request */
- if (msg->request_size && msg->flags & WILCO_EC_FLAG_RAW_REQUEST) {
- rq->reserved_raw = *(u8 *)msg->request_data;
- msg->request_size--;
- memmove(msg->request_data, msg->request_data + 1,
- msg->request_size);
- }
-
- /* Fill in request packet */
rq->struct_version = EC_MAILBOX_PROTO_VERSION;
rq->mailbox_id = msg->type;
rq->mailbox_version = EC_MAILBOX_VERSION;
- rq->data_size = msg->request_size + EC_MAILBOX_DATA_EXTRA;
- rq->command = msg->command;
+ rq->data_size = msg->request_size;
/* Checksum header and data */
rq->checksum = wilco_ec_checksum(rq, sizeof(*rq));
@@ -159,6 +148,12 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
return -EIO;
}
+ /*
+ * The EC always returns either EC_MAILBOX_DATA_SIZE or
+ * EC_MAILBOX_DATA_SIZE_EXTENDED bytes of data, so we need to
+ * calculate the checksum on **all** of this data, even if we
+ * won't use all of it.
+ */
if (msg->flags & WILCO_EC_FLAG_EXTENDED_DATA)
size = EC_MAILBOX_DATA_SIZE_EXTENDED;
else
@@ -173,33 +168,26 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
return -EBADMSG;
}
- /* Check that the EC reported success */
- msg->result = rs->result;
- if (msg->result) {
- dev_dbg(ec->dev, "bad response: 0x%02x\n", msg->result);
+ if (rs->result) {
+ dev_dbg(ec->dev, "EC reported failure: 0x%02x\n", rs->result);
return -EBADMSG;
}
- /* Check the returned data size, skipping the header */
if (rs->data_size != size) {
dev_dbg(ec->dev, "unexpected packet size (%u != %zu)",
rs->data_size, size);
return -EMSGSIZE;
}
- /* Skip 1 response data byte unless specified */
- size = (msg->flags & WILCO_EC_FLAG_RAW_RESPONSE) ? 0 : 1;
- if ((ssize_t) rs->data_size - size < msg->response_size) {
- dev_dbg(ec->dev, "response data too short (%zd < %zu)",
- (ssize_t) rs->data_size - size, msg->response_size);
+ if (rs->data_size < msg->response_size) {
+ dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)",
+ rs->data_size, msg->response_size);
return -EMSGSIZE;
}
- /* Ignore response data bytes as requested */
- memcpy(msg->response_data, rs->data + size, msg->response_size);
+ memcpy(msg->response_data, rs->data, msg->response_size);
- /* Return actual amount of data received */
- return msg->response_size;
+ return rs->data_size;
}
/**
@@ -207,10 +195,12 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
* @ec: EC device.
* @msg: EC message data for request and response.
*
- * On entry msg->type, msg->flags, msg->command, msg->request_size,
- * msg->response_size, and msg->request_data should all be filled in.
+ * On entry msg->type, msg->request_size, and msg->request_data should all be
+ * filled in. If desired, msg->flags can be set.
*
- * On exit msg->result and msg->response_data will be filled.
+ * If a response is expected, msg->response_size should be set, and
+ * msg->response_data should point to a buffer with enough space. On exit
+ * msg->response_data will be filled.
*
* Return: number of bytes received or negative error code on failure.
*/
@@ -219,9 +209,8 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
struct wilco_ec_request *rq;
int ret;
- dev_dbg(ec->dev, "cmd=%02x type=%04x flags=%02x rslen=%zu rqlen=%zu\n",
- msg->command, msg->type, msg->flags, msg->response_size,
- msg->request_size);
+ dev_dbg(ec->dev, "type=%04x flags=%02x rslen=%zu rqlen=%zu\n",
+ msg->type, msg->flags, msg->response_size, msg->request_size);
mutex_lock(&ec->mailbox_lock);
/* Prepare request packet */
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 321bc673c417..cef0133aa47a 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -274,7 +274,8 @@ static int pin_user_pages(unsigned long first_page,
*iter_last_page_size = last_page_size;
}
- ret = get_user_pages_fast(first_page, requested_pages, !is_write,
+ ret = get_user_pages_fast(first_page, requested_pages,
+ !is_write ? FOLL_WRITE : 0,
pages);
if (ret <= 0)
return -EFAULT;
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index cd8a90846063..530fe7e31397 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -5,7 +5,7 @@
menuconfig MELLANOX_PLATFORM
bool "Platform support for Mellanox hardware"
- depends on X86 || ARM || COMPILE_TEST
+ depends on X86 || ARM || ARM64 || COMPILE_TEST
---help---
Say Y here to get to see options for platform support for
Mellanox systems. This option alone does not add any kernel code.
@@ -34,4 +34,14 @@ config MLXREG_IO
to system resets operation, system reset causes monitoring and some
kinds of mux selection.
+config MLXBF_TMFIFO
+ tristate "Mellanox BlueField SoC TmFifo platform driver"
+ depends on ARM64
+ depends on ACPI
+ depends on VIRTIO_CONSOLE && VIRTIO_NET
+ help
+ Say y here to enable TmFifo support. The TmFifo driver provides
+ platform driver support for the TmFifo which supports console
+ and networking based on the virtio framework.
+
endif # MELLANOX_PLATFORM
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index 57074d9c722c..a229bda18fd9 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -3,5 +3,6 @@
# Makefile for linux/drivers/platform/mellanox
# Mellanox Platform-Specific Drivers
#
+obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo-regs.h b/drivers/platform/mellanox/mlxbf-tmfifo-regs.h
new file mode 100644
index 000000000000..e4f0d2eda714
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-tmfifo-regs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLXBF_TMFIFO_REGS_H__
+#define __MLXBF_TMFIFO_REGS_H__
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#define MLXBF_TMFIFO_TX_DATA 0x00
+#define MLXBF_TMFIFO_TX_STS 0x08
+#define MLXBF_TMFIFO_TX_STS__LENGTH 0x0001
+#define MLXBF_TMFIFO_TX_STS__COUNT_SHIFT 0
+#define MLXBF_TMFIFO_TX_STS__COUNT_WIDTH 9
+#define MLXBF_TMFIFO_TX_STS__COUNT_RESET_VAL 0
+#define MLXBF_TMFIFO_TX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_STS__COUNT_MASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_CTL 0x10
+#define MLXBF_TMFIFO_TX_CTL__LENGTH 0x0001
+#define MLXBF_TMFIFO_TX_CTL__LWM_SHIFT 0
+#define MLXBF_TMFIFO_TX_CTL__LWM_WIDTH 8
+#define MLXBF_TMFIFO_TX_CTL__LWM_RESET_VAL 128
+#define MLXBF_TMFIFO_TX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__LWM_MASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__HWM_SHIFT 8
+#define MLXBF_TMFIFO_TX_CTL__HWM_WIDTH 8
+#define MLXBF_TMFIFO_TX_CTL__HWM_RESET_VAL 128
+#define MLXBF_TMFIFO_TX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_TX_CTL__HWM_MASK GENMASK_ULL(15, 8)
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_SHIFT 32
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_WIDTH 9
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RESET_VAL 256
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
+#define MLXBF_TMFIFO_RX_DATA 0x00
+#define MLXBF_TMFIFO_RX_STS 0x08
+#define MLXBF_TMFIFO_RX_STS__LENGTH 0x0001
+#define MLXBF_TMFIFO_RX_STS__COUNT_SHIFT 0
+#define MLXBF_TMFIFO_RX_STS__COUNT_WIDTH 9
+#define MLXBF_TMFIFO_RX_STS__COUNT_RESET_VAL 0
+#define MLXBF_TMFIFO_RX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_STS__COUNT_MASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_CTL 0x10
+#define MLXBF_TMFIFO_RX_CTL__LENGTH 0x0001
+#define MLXBF_TMFIFO_RX_CTL__LWM_SHIFT 0
+#define MLXBF_TMFIFO_RX_CTL__LWM_WIDTH 8
+#define MLXBF_TMFIFO_RX_CTL__LWM_RESET_VAL 128
+#define MLXBF_TMFIFO_RX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__LWM_MASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__HWM_SHIFT 8
+#define MLXBF_TMFIFO_RX_CTL__HWM_WIDTH 8
+#define MLXBF_TMFIFO_RX_CTL__HWM_RESET_VAL 128
+#define MLXBF_TMFIFO_RX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
+#define MLXBF_TMFIFO_RX_CTL__HWM_MASK GENMASK_ULL(15, 8)
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_SHIFT 32
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_WIDTH 9
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RESET_VAL 256
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
+#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
+
+#endif /* !defined(__MLXBF_TMFIFO_REGS_H__) */
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
new file mode 100644
index 000000000000..9a5c9fd2dbc6
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -0,0 +1,1281 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox BlueField SoC TmFifo driver
+ *
+ * Copyright (C) 2019 Mellanox Technologies
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/circ_buf.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/virtio_config.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_ring.h>
+
+#include "mlxbf-tmfifo-regs.h"
+
+/* Vring size. */
+#define MLXBF_TMFIFO_VRING_SIZE SZ_1K
+
+/* Console Tx buffer size. */
+#define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
+
+/* Console Tx buffer reserved space. */
+#define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8
+
+/* House-keeping timer interval. */
+#define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10)
+
+/* Virtual devices sharing the TM FIFO. */
+#define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1)
+
+/*
+ * Reserve 1/16 of TmFifo space, so console messages are not starved by
+ * the networking traffic.
+ */
+#define MLXBF_TMFIFO_RESERVE_RATIO 16
+
+/* Message with data needs at least two words (for header & data). */
+#define MLXBF_TMFIFO_DATA_MIN_WORDS 2
+
+struct mlxbf_tmfifo;
+
+/**
+ * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
+ * @va: virtual address of the ring
+ * @dma: dma address of the ring
+ * @vq: pointer to the virtio virtqueue
+ * @desc: current descriptor of the pending packet
+ * @desc_head: head descriptor of the pending packet
+ * @cur_len: processed length of the current descriptor
+ * @rem_len: remaining length of the pending packet
+ * @pkt_len: total length of the pending packet
+ * @next_avail: next avail descriptor id
+ * @num: vring size (number of descriptors)
+ * @align: vring alignment size
+ * @index: vring index
+ * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
+ * @fifo: pointer to the tmfifo structure
+ */
+struct mlxbf_tmfifo_vring {
+ void *va;
+ dma_addr_t dma;
+ struct virtqueue *vq;
+ struct vring_desc *desc;
+ struct vring_desc *desc_head;
+ int cur_len;
+ int rem_len;
+ u32 pkt_len;
+ u16 next_avail;
+ int num;
+ int align;
+ int index;
+ int vdev_id;
+ struct mlxbf_tmfifo *fifo;
+};
+
+/* Interrupt types. */
+enum {
+ MLXBF_TM_RX_LWM_IRQ,
+ MLXBF_TM_RX_HWM_IRQ,
+ MLXBF_TM_TX_LWM_IRQ,
+ MLXBF_TM_TX_HWM_IRQ,
+ MLXBF_TM_MAX_IRQ
+};
+
+/* Ring types (Rx & Tx). */
+enum {
+ MLXBF_TMFIFO_VRING_RX,
+ MLXBF_TMFIFO_VRING_TX,
+ MLXBF_TMFIFO_VRING_MAX
+};
+
+/**
+ * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
+ * @vdev: virtio device, in which the vdev.id.device field has the
+ * VIRTIO_ID_xxx id to distinguish the virtual device.
+ * @status: status of the device
+ * @features: supported features of the device
+ * @vrings: array of tmfifo vrings of this device
+ * @config.cons: virtual console config -
+ * select if vdev.id.device is VIRTIO_ID_CONSOLE
+ * @config.net: virtual network config -
+ * select if vdev.id.device is VIRTIO_ID_NET
+ * @tx_buf: tx buffer used to buffer data before writing into the FIFO
+ */
+struct mlxbf_tmfifo_vdev {
+ struct virtio_device vdev;
+ u8 status;
+ u64 features;
+ struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
+ union {
+ struct virtio_console_config cons;
+ struct virtio_net_config net;
+ } config;
+ struct circ_buf tx_buf;
+};
+
+/**
+ * mlxbf_tmfifo_irq_info - Structure of the interrupt information
+ * @fifo: pointer to the tmfifo structure
+ * @irq: interrupt number
+ * @index: index into the interrupt array
+ */
+struct mlxbf_tmfifo_irq_info {
+ struct mlxbf_tmfifo *fifo;
+ int irq;
+ int index;
+};
+
+/**
+ * mlxbf_tmfifo - Structure of the TmFifo
+ * @vdev: array of the virtual devices running over the TmFifo
+ * @lock: lock to protect the TmFifo access
+ * @rx_base: mapped register base address for the Rx FIFO
+ * @tx_base: mapped register base address for the Tx FIFO
+ * @rx_fifo_size: number of entries of the Rx FIFO
+ * @tx_fifo_size: number of entries of the Tx FIFO
+ * @pend_events: pending bits for deferred events
+ * @irq_info: interrupt information
+ * @work: work struct for deferred process
+ * @timer: background timer
+ * @vring: Tx/Rx ring
+ * @spin_lock: spin lock
+ * @is_ready: ready flag
+ */
+struct mlxbf_tmfifo {
+ struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
+ struct mutex lock; /* TmFifo lock */
+ void __iomem *rx_base;
+ void __iomem *tx_base;
+ int rx_fifo_size;
+ int tx_fifo_size;
+ unsigned long pend_events;
+ struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
+ struct work_struct work;
+ struct timer_list timer;
+ struct mlxbf_tmfifo_vring *vring[2];
+ spinlock_t spin_lock; /* spin lock */
+ bool is_ready;
+};
+
+/**
+ * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
+ * @type: message type
+ * @len: payload length in network byte order. Messages sent into the FIFO
+ * will be read by the other side as data stream in the same byte order.
+ * The length needs to be encoded into network order so both sides
+ * could understand it.
+ */
+struct mlxbf_tmfifo_msg_hdr {
+ u8 type;
+ __be16 len;
+ u8 unused[5];
+} __packed __aligned(sizeof(u64));
+
+/*
+ * Default MAC.
+ * This MAC address will be read from EFI persistent variable if configured.
+ * It can also be reconfigured with standard Linux tools.
+ */
+static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
+ 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
+};
+
+/* EFI variable name of the MAC address. */
+static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
+
+/* Maximum L2 header length. */
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+
+/* Supported virtio-net features. */
+#define MLXBF_TMFIFO_NET_FEATURES \
+ (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
+ BIT_ULL(VIRTIO_NET_F_MAC))
+
+#define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
+
+/* Free vrings of the FIFO device. */
+static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
+ struct mlxbf_tmfifo_vdev *tm_vdev)
+{
+ struct mlxbf_tmfifo_vring *vring;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+ if (vring->va) {
+ size = vring_size(vring->num, vring->align);
+ dma_free_coherent(tm_vdev->vdev.dev.parent, size,
+ vring->va, vring->dma);
+ vring->va = NULL;
+ if (vring->vq) {
+ vring_del_virtqueue(vring->vq);
+ vring->vq = NULL;
+ }
+ }
+ }
+}
+
+/* Allocate vrings for the FIFO. */
+static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
+ struct mlxbf_tmfifo_vdev *tm_vdev)
+{
+ struct mlxbf_tmfifo_vring *vring;
+ struct device *dev;
+ dma_addr_t dma;
+ int i, size;
+ void *va;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+ vring->fifo = fifo;
+ vring->num = MLXBF_TMFIFO_VRING_SIZE;
+ vring->align = SMP_CACHE_BYTES;
+ vring->index = i;
+ vring->vdev_id = tm_vdev->vdev.id.device;
+ dev = &tm_vdev->vdev.dev;
+
+ size = vring_size(vring->num, vring->align);
+ va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
+ if (!va) {
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ dev_err(dev->parent, "dma_alloc_coherent failed\n");
+ return -ENOMEM;
+ }
+
+ vring->va = va;
+ vring->dma = dma;
+ }
+
+ return 0;
+}
+
+/* Disable interrupts of the FIFO device. */
+static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
+{
+ int i, irq;
+
+ for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
+ irq = fifo->irq_info[i].irq;
+ fifo->irq_info[i].irq = 0;
+ disable_irq(irq);
+ }
+}
+
+/* Interrupt handler. */
+static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
+{
+ struct mlxbf_tmfifo_irq_info *irq_info = arg;
+
+ if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
+ schedule_work(&irq_info->fifo->work);
+
+ return IRQ_HANDLED;
+}
+
+/* Get the next packet descriptor from the vring. */
+static struct vring_desc *
+mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ unsigned int idx, head;
+
+ if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
+ return NULL;
+
+ idx = vring->next_avail % vr->num;
+ head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
+ if (WARN_ON(head >= vr->num))
+ return NULL;
+
+ vring->next_avail++;
+
+ return &vr->desc[head];
+}
+
+/* Release virtio descriptor. */
+static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc, u32 len)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ u16 idx, vr_idx;
+
+ vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
+ idx = vr_idx % vr->num;
+ vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
+ vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
+
+ /*
+ * Virtio could poll and check the 'idx' to decide whether the desc is
+ * done or not. Add a memory barrier here to make sure the update above
+ * completes before updating the idx.
+ */
+ mb();
+ vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
+}
+
+/* Get the total length of the descriptor chain. */
+static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = vring->vq->vdev;
+ u32 len = 0, idx;
+
+ while (desc) {
+ len += virtio32_to_cpu(vdev, desc->len);
+ if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
+ break;
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ }
+
+ return len;
+}
+
+static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+{
+ struct vring_desc *desc_head;
+ u32 len = 0;
+
+ if (vring->desc_head) {
+ desc_head = vring->desc_head;
+ len = vring->pkt_len;
+ } else {
+ desc_head = mlxbf_tmfifo_get_next_desc(vring);
+ len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
+ }
+
+ if (desc_head)
+ mlxbf_tmfifo_release_desc(vring, desc_head, len);
+
+ vring->pkt_len = 0;
+ vring->desc = NULL;
+ vring->desc_head = NULL;
+}
+
+static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc, bool is_rx)
+{
+ struct virtio_device *vdev = vring->vq->vdev;
+ struct virtio_net_hdr *net_hdr;
+
+ net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+ memset(net_hdr, 0, sizeof(*net_hdr));
+}
+
+/* Get and initialize the next packet. */
+static struct vring_desc *
+mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+{
+ struct vring_desc *desc;
+
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
+ mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
+
+ vring->desc_head = desc;
+ vring->desc = desc;
+
+ return desc;
+}
+
+/* House-keeping timer. */
+static void mlxbf_tmfifo_timer(struct timer_list *t)
+{
+ struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
+ int rx, tx;
+
+ rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
+ tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
+
+ if (rx || tx)
+ schedule_work(&fifo->work);
+
+ mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+}
+
+/* Copy one console packet into the output buffer. */
+static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
+ struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct virtio_device *vdev = &cons->vdev;
+ u32 len, idx, seg;
+ void *addr;
+
+ while (desc) {
+ addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+ len = virtio32_to_cpu(vdev, desc->len);
+
+ seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (len <= seg) {
+ memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
+ } else {
+ memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
+ addr += seg;
+ memcpy(cons->tx_buf.buf, addr, len - seg);
+ }
+ cons->tx_buf.head = (cons->tx_buf.head + len) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+
+ if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
+ break;
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ }
+}
+
+/* Copy console data into the output buffer. */
+static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
+ struct mlxbf_tmfifo_vring *vring)
+{
+ struct vring_desc *desc;
+ u32 len, avail;
+
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ while (desc) {
+ /* Release the packet if not enough space. */
+ len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
+ mlxbf_tmfifo_release_desc(vring, desc, len);
+ break;
+ }
+
+ mlxbf_tmfifo_console_output_one(cons, vring, desc);
+ mlxbf_tmfifo_release_desc(vring, desc, len);
+ desc = mlxbf_tmfifo_get_next_desc(vring);
+ }
+}
+
+/* Get the number of available words in Rx FIFO for receiving. */
+static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
+{
+ u64 sts;
+
+ sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS);
+ return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
+}
+
+/* Get the number of available words in the TmFifo for sending. */
+static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
+{
+ int tx_reserve;
+ u32 count;
+ u64 sts;
+
+ /* Reserve some room in FIFO for console messages. */
+ if (vdev_id == VIRTIO_ID_NET)
+ tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
+ else
+ tx_reserve = 1;
+
+ sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS);
+ count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
+ return fifo->tx_fifo_size - tx_reserve - count;
+}
+
+/* Console Tx (move data from the output buffer into the TmFifo). */
+static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
+{
+ struct mlxbf_tmfifo_msg_hdr hdr;
+ struct mlxbf_tmfifo_vdev *cons;
+ unsigned long flags;
+ int size, seg;
+ void *addr;
+ u64 data;
+
+ /* Return if not enough space available. */
+ if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
+ return;
+
+ cons = fifo->vdev[VIRTIO_ID_CONSOLE];
+ if (!cons || !cons->tx_buf.buf)
+ return;
+
+ /* Return if no data to send. */
+ size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (size == 0)
+ return;
+
+ /* Adjust the size to available space. */
+ if (size + sizeof(hdr) > avail * sizeof(u64))
+ size = avail * sizeof(u64) - sizeof(hdr);
+
+ /* Write header. */
+ hdr.type = VIRTIO_ID_CONSOLE;
+ hdr.len = htons(size);
+ writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+
+ /* Use spin-lock to protect the 'cons->tx_buf'. */
+ spin_lock_irqsave(&fifo->spin_lock, flags);
+
+ while (size > 0) {
+ addr = cons->tx_buf.buf + cons->tx_buf.tail;
+
+ seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE);
+ if (seg >= sizeof(u64)) {
+ memcpy(&data, addr, sizeof(u64));
+ } else {
+ memcpy(&data, addr, seg);
+ memcpy((u8 *)&data + seg, cons->tx_buf.buf,
+ sizeof(u64) - seg);
+ }
+ writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+
+ if (size >= sizeof(u64)) {
+ cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+ size -= sizeof(u64);
+ } else {
+ cons->tx_buf.tail = (cons->tx_buf.tail + size) %
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE;
+ size = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->spin_lock, flags);
+}
+
+/* Rx/Tx one word in the descriptor buffer. */
+static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc,
+ bool is_rx, int len)
+{
+ struct virtio_device *vdev = vring->vq->vdev;
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ void *addr;
+ u64 data;
+
+ /* Get the buffer address of this desc. */
+ addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
+
+ /* Read a word from FIFO for Rx. */
+ if (is_rx)
+ data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
+
+ if (vring->cur_len + sizeof(u64) <= len) {
+ /* The whole word. */
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data, sizeof(u64));
+ else
+ memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ vring->cur_len += sizeof(u64);
+ } else {
+ /* Leftover bytes. */
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ else
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ vring->cur_len = len;
+ }
+
+ /* Write the word into FIFO for Tx. */
+ if (!is_rx)
+ writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+}
+
+/*
+ * Rx/Tx packet header.
+ *
+ * In Rx case, the packet might be found to belong to a different vring since
+ * the TmFifo is shared by different services. In such case, the 'vring_change'
+ * flag is set.
+ */
+static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+ struct vring_desc *desc,
+ bool is_rx, bool *vring_change)
+{
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ struct virtio_net_config *config;
+ struct mlxbf_tmfifo_msg_hdr hdr;
+ int vdev_id, hdr_len;
+
+ /* Read/Write packet header. */
+ if (is_rx) {
+ /* Drain one word from the FIFO. */
+ *(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
+
+ /* Skip the length 0 packets (keepalive). */
+ if (hdr.len == 0)
+ return;
+
+ /* Check packet type. */
+ if (hdr.type == VIRTIO_ID_NET) {
+ vdev_id = VIRTIO_ID_NET;
+ hdr_len = sizeof(struct virtio_net_hdr);
+ config = &fifo->vdev[vdev_id]->config.net;
+ if (ntohs(hdr.len) > config->mtu +
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ return;
+ } else {
+ vdev_id = VIRTIO_ID_CONSOLE;
+ hdr_len = 0;
+ }
+
+ /*
+ * Check whether the new packet still belongs to this vring.
+ * If not, update the pkt_len of the new vring.
+ */
+ if (vdev_id != vring->vdev_id) {
+ struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
+
+ if (!tm_dev2)
+ return;
+ vring->desc = desc;
+ vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
+ *vring_change = true;
+ }
+ vring->pkt_len = ntohs(hdr.len) + hdr_len;
+ } else {
+ /* Network virtio has an extra header. */
+ hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
+ sizeof(struct virtio_net_hdr) : 0;
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
+ VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
+ hdr.len = htons(vring->pkt_len - hdr_len);
+ writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+ }
+
+ vring->cur_len = hdr_len;
+ vring->rem_len = vring->pkt_len;
+ fifo->vring[is_rx] = vring;
+}
+
+/*
+ * Rx/Tx one descriptor.
+ *
+ * Return true to indicate more data available.
+ */
+static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ bool is_rx, int *avail)
+{
+ const struct vring *vr = virtqueue_get_vring(vring->vq);
+ struct mlxbf_tmfifo *fifo = vring->fifo;
+ struct virtio_device *vdev;
+ bool vring_change = false;
+ struct vring_desc *desc;
+ unsigned long flags;
+ u32 len, idx;
+
+ vdev = &fifo->vdev[vring->vdev_id]->vdev;
+
+ /* Get the descriptor of the next packet. */
+ if (!vring->desc) {
+ desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
+ if (!desc)
+ return false;
+ } else {
+ desc = vring->desc;
+ }
+
+ /* Beginning of a packet. Start to Rx/Tx packet header. */
+ if (vring->pkt_len == 0) {
+ mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ (*avail)--;
+
+ /* Return if new packet is for another ring. */
+ if (vring_change)
+ return false;
+ goto mlxbf_tmfifo_desc_done;
+ }
+
+ /* Get the length of this desc. */
+ len = virtio32_to_cpu(vdev, desc->len);
+ if (len > vring->rem_len)
+ len = vring->rem_len;
+
+ /* Rx/Tx one word (8 bytes) if not done. */
+ if (vring->cur_len < len) {
+ mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
+ (*avail)--;
+ }
+
+ /* Check again whether it's done. */
+ if (vring->cur_len == len) {
+ vring->cur_len = 0;
+ vring->rem_len -= len;
+
+ /* Get the next desc on the chain. */
+ if (vring->rem_len > 0 &&
+ (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
+ idx = virtio16_to_cpu(vdev, desc->next);
+ desc = &vr->desc[idx];
+ goto mlxbf_tmfifo_desc_done;
+ }
+
+ /* Done and release the pending packet. */
+ mlxbf_tmfifo_release_pending_pkt(vring);
+ desc = NULL;
+ fifo->vring[is_rx] = NULL;
+
+ /* Notify upper layer that packet is done. */
+ spin_lock_irqsave(&fifo->spin_lock, flags);
+ vring_interrupt(0, vring->vq);
+ spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ }
+
+mlxbf_tmfifo_desc_done:
+ /* Save the current desc. */
+ vring->desc = desc;
+
+ return true;
+}
+
+/* Rx & Tx processing of a queue. */
+static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+{
+ int avail = 0, devid = vring->vdev_id;
+ struct mlxbf_tmfifo *fifo;
+ bool more;
+
+ fifo = vring->fifo;
+
+ /* Return if vdev is not ready. */
+ if (!fifo->vdev[devid])
+ return;
+
+ /* Return if another vring is running. */
+ if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
+ return;
+
+ /* Only handle console and network for now. */
+ if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
+ return;
+
+ do {
+ /* Get available FIFO space. */
+ if (avail == 0) {
+ if (is_rx)
+ avail = mlxbf_tmfifo_get_rx_avail(fifo);
+ else
+ avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
+ if (avail <= 0)
+ break;
+ }
+
+ /* Console output always comes from the Tx buffer. */
+ if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
+ mlxbf_tmfifo_console_tx(fifo, avail);
+ break;
+ }
+
+ /* Handle one descriptor. */
+ more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
+ } while (more);
+}
+
+/* Handle Rx or Tx queues. */
+static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
+ int irq_id, bool is_rx)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+ struct mlxbf_tmfifo_vring *vring;
+ int i;
+
+ if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
+ !fifo->irq_info[irq_id].irq)
+ return;
+
+ for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
+ tm_vdev = fifo->vdev[i];
+ if (tm_vdev) {
+ vring = &tm_vdev->vrings[queue_id];
+ if (vring->vq)
+ mlxbf_tmfifo_rxtx(vring, is_rx);
+ }
+ }
+}
+
+/* Work handler for Rx and Tx case. */
+static void mlxbf_tmfifo_work_handler(struct work_struct *work)
+{
+ struct mlxbf_tmfifo *fifo;
+
+ fifo = container_of(work, struct mlxbf_tmfifo, work);
+ if (!fifo->is_ready)
+ return;
+
+ mutex_lock(&fifo->lock);
+
+ /* Tx (Send data to the TmFifo). */
+ mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
+ MLXBF_TM_TX_LWM_IRQ, false);
+
+ /* Rx (Receive data from the TmFifo). */
+ mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
+ MLXBF_TM_RX_HWM_IRQ, true);
+
+ mutex_unlock(&fifo->lock);
+}
+
+/* The notify function is called when new buffers are posted. */
+static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
+{
+ struct mlxbf_tmfifo_vring *vring = vq->priv;
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+ struct mlxbf_tmfifo *fifo;
+ unsigned long flags;
+
+ fifo = vring->fifo;
+
+ /*
+ * Virtio maintains vrings in pairs, even number ring for Rx
+ * and odd number ring for Tx.
+ */
+ if (vring->index & BIT(0)) {
+ /*
+ * Console could make blocking call with interrupts disabled.
+ * In such case, the vring needs to be served right away. For
+ * other cases, just set the TX LWM bit to start Tx in the
+ * worker handler.
+ */
+ if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
+ spin_lock_irqsave(&fifo->spin_lock, flags);
+ tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
+ mlxbf_tmfifo_console_output(tm_vdev, vring);
+ spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
+ &fifo->pend_events)) {
+ return true;
+ }
+ } else {
+ if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
+ return true;
+ }
+
+ schedule_work(&fifo->work);
+
+ return true;
+}
+
+/* Get the array of feature bits for this device. */
+static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ return tm_vdev->features;
+}
+
+/* Confirm device features to use. */
+static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->features = vdev->features;
+
+ return 0;
+}
+
+/* Free virtqueues found by find_vqs(). */
+static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+ struct mlxbf_tmfifo_vring *vring;
+ struct virtqueue *vq;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
+ vring = &tm_vdev->vrings[i];
+
+ /* Release the pending packet. */
+ if (vring->desc)
+ mlxbf_tmfifo_release_pending_pkt(vring);
+ vq = vring->vq;
+ if (vq) {
+ vring->vq = NULL;
+ vring_del_virtqueue(vq);
+ }
+ }
+}
+
+/* Create and initialize the virtual queues. */
+static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
+ unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[],
+ const bool *ctx,
+ struct irq_affinity *desc)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+ struct mlxbf_tmfifo_vring *vring;
+ struct virtqueue *vq;
+ int i, ret, size;
+
+ if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
+ return -EINVAL;
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ ret = -EINVAL;
+ goto error;
+ }
+ vring = &tm_vdev->vrings[i];
+
+ /* zero vring */
+ size = vring_size(vring->num, vring->align);
+ memset(vring->va, 0, size);
+ vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
+ false, false, vring->va,
+ mlxbf_tmfifo_virtio_notify,
+ callbacks[i], names[i]);
+ if (!vq) {
+ dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ vqs[i] = vq;
+ vring->vq = vq;
+ vq->priv = vring;
+ }
+
+ return 0;
+
+error:
+ mlxbf_tmfifo_virtio_del_vqs(vdev);
+ return ret;
+}
+
+/* Read the status byte. */
+static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ return tm_vdev->status;
+}
+
+/* Write the status byte. */
+static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
+ u8 status)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->status = status;
+}
+
+/* Reset the device. Not much here for now. */
+static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ tm_vdev->status = 0;
+}
+
+/* Read the value of a configuration field. */
+static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf,
+ unsigned int len)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ if ((u64)offset + len > sizeof(tm_vdev->config))
+ return;
+
+ memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
+}
+
+/* Write the value of a configuration field. */
+static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
+ unsigned int offset,
+ const void *buf,
+ unsigned int len)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ if ((u64)offset + len > sizeof(tm_vdev->config))
+ return;
+
+ memcpy((u8 *)&tm_vdev->config + offset, buf, len);
+}
+
+static void tmfifo_virtio_dev_release(struct device *device)
+{
+ struct virtio_device *vdev =
+ container_of(device, struct virtio_device, dev);
+ struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
+
+ kfree(tm_vdev);
+}
+
+/* Virtio config operations. */
+static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
+ .get_features = mlxbf_tmfifo_virtio_get_features,
+ .finalize_features = mlxbf_tmfifo_virtio_finalize_features,
+ .find_vqs = mlxbf_tmfifo_virtio_find_vqs,
+ .del_vqs = mlxbf_tmfifo_virtio_del_vqs,
+ .reset = mlxbf_tmfifo_virtio_reset,
+ .set_status = mlxbf_tmfifo_virtio_set_status,
+ .get_status = mlxbf_tmfifo_virtio_get_status,
+ .get = mlxbf_tmfifo_virtio_get,
+ .set = mlxbf_tmfifo_virtio_set,
+};
+
+/* Create vdev for the FIFO. */
+static int mlxbf_tmfifo_create_vdev(struct device *dev,
+ struct mlxbf_tmfifo *fifo,
+ int vdev_id, u64 features,
+ void *config, u32 size)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
+ int ret;
+
+ mutex_lock(&fifo->lock);
+
+ tm_vdev = fifo->vdev[vdev_id];
+ if (tm_vdev) {
+ dev_err(dev, "vdev %d already exists\n", vdev_id);
+ ret = -EEXIST;
+ goto fail;
+ }
+
+ tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
+ if (!tm_vdev) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ tm_vdev->vdev.id.device = vdev_id;
+ tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
+ tm_vdev->vdev.dev.parent = dev;
+ tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
+ tm_vdev->features = features;
+ if (config)
+ memcpy(&tm_vdev->config, config, size);
+
+ if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
+ dev_err(dev, "unable to allocate vring\n");
+ ret = -ENOMEM;
+ goto vdev_fail;
+ }
+
+ /* Allocate an output buffer for the console device. */
+ if (vdev_id == VIRTIO_ID_CONSOLE)
+ tm_vdev->tx_buf.buf = devm_kmalloc(dev,
+ MLXBF_TMFIFO_CON_TX_BUF_SIZE,
+ GFP_KERNEL);
+ fifo->vdev[vdev_id] = tm_vdev;
+
+ /* Register the virtio device. */
+ ret = register_virtio_device(&tm_vdev->vdev);
+ reg_dev = tm_vdev;
+ if (ret) {
+ dev_err(dev, "register_virtio_device failed\n");
+ goto vdev_fail;
+ }
+
+ mutex_unlock(&fifo->lock);
+ return 0;
+
+vdev_fail:
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ fifo->vdev[vdev_id] = NULL;
+ if (reg_dev)
+ put_device(&tm_vdev->vdev.dev);
+ else
+ kfree(tm_vdev);
+fail:
+ mutex_unlock(&fifo->lock);
+ return ret;
+}
+
+/* Delete vdev for the FIFO. */
+static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
+{
+ struct mlxbf_tmfifo_vdev *tm_vdev;
+
+ mutex_lock(&fifo->lock);
+
+ /* Unregister vdev. */
+ tm_vdev = fifo->vdev[vdev_id];
+ if (tm_vdev) {
+ unregister_virtio_device(&tm_vdev->vdev);
+ mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
+ fifo->vdev[vdev_id] = NULL;
+ }
+
+ mutex_unlock(&fifo->lock);
+
+ return 0;
+}
+
+/* Read the configured network MAC address from efi variable. */
+static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
+{
+ efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
+ unsigned long size = ETH_ALEN;
+ u8 buf[ETH_ALEN];
+ efi_status_t rc;
+
+ rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
+ if (rc == EFI_SUCCESS && size == ETH_ALEN)
+ ether_addr_copy(mac, buf);
+ else
+ ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
+}
+
+/* Set TmFifo thresolds which is used to trigger interrupts. */
+static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
+{
+ u64 ctl;
+
+ /* Get Tx FIFO size and set the low/high watermark. */
+ ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
+ fifo->tx_fifo_size =
+ FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
+ ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
+ fifo->tx_fifo_size / 2);
+ ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
+ fifo->tx_fifo_size - 1);
+ writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
+
+ /* Get Rx FIFO size and set the low/high watermark. */
+ ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
+ fifo->rx_fifo_size =
+ FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
+ ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
+ ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
+ FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
+ writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
+}
+
+static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
+{
+ int i;
+
+ fifo->is_ready = false;
+ del_timer_sync(&fifo->timer);
+ mlxbf_tmfifo_disable_irqs(fifo);
+ cancel_work_sync(&fifo->work);
+ for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
+ mlxbf_tmfifo_delete_vdev(fifo, i);
+}
+
+/* Probe the TMFIFO. */
+static int mlxbf_tmfifo_probe(struct platform_device *pdev)
+{
+ struct virtio_net_config net_config;
+ struct device *dev = &pdev->dev;
+ struct mlxbf_tmfifo *fifo;
+ int i, rc;
+
+ fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return -ENOMEM;
+
+ spin_lock_init(&fifo->spin_lock);
+ INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
+ mutex_init(&fifo->lock);
+
+ /* Get the resource of the Rx FIFO. */
+ fifo->rx_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fifo->rx_base))
+ return PTR_ERR(fifo->rx_base);
+
+ /* Get the resource of the Tx FIFO. */
+ fifo->tx_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(fifo->tx_base))
+ return PTR_ERR(fifo->tx_base);
+
+ platform_set_drvdata(pdev, fifo);
+
+ timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
+
+ for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
+ fifo->irq_info[i].index = i;
+ fifo->irq_info[i].fifo = fifo;
+ fifo->irq_info[i].irq = platform_get_irq(pdev, i);
+ rc = devm_request_irq(dev, fifo->irq_info[i].irq,
+ mlxbf_tmfifo_irq_handler, 0,
+ "tmfifo", &fifo->irq_info[i]);
+ if (rc) {
+ dev_err(dev, "devm_request_irq failed\n");
+ fifo->irq_info[i].irq = 0;
+ return rc;
+ }
+ }
+
+ mlxbf_tmfifo_set_threshold(fifo);
+
+ /* Create the console vdev. */
+ rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
+ if (rc)
+ goto fail;
+
+ /* Create the network vdev. */
+ memset(&net_config, 0, sizeof(net_config));
+ net_config.mtu = ETH_DATA_LEN;
+ net_config.status = VIRTIO_NET_S_LINK_UP;
+ mlxbf_tmfifo_get_cfg_mac(net_config.mac);
+ rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
+ MLXBF_TMFIFO_NET_FEATURES, &net_config,
+ sizeof(net_config));
+ if (rc)
+ goto fail;
+
+ mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+
+ fifo->is_ready = true;
+ return 0;
+
+fail:
+ mlxbf_tmfifo_cleanup(fifo);
+ return rc;
+}
+
+/* Device remove function. */
+static int mlxbf_tmfifo_remove(struct platform_device *pdev)
+{
+ struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
+
+ mlxbf_tmfifo_cleanup(fifo);
+
+ return 0;
+}
+
+static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
+ { "MLNXBF01", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
+
+static struct platform_driver mlxbf_tmfifo_driver = {
+ .probe = mlxbf_tmfifo_probe,
+ .remove = mlxbf_tmfifo_remove,
+ .driver = {
+ .name = "bf-tmfifo",
+ .acpi_match_table = mlxbf_tmfifo_acpi_match,
+ },
+};
+
+module_platform_driver(mlxbf_tmfifo_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mellanox Technologies");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index a1ed13183559..85b92a95e4c8 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1263,6 +1263,17 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn.
+config INTEL_MRFLD_PWRBTN
+ tristate "Intel Merrifield Basin Cove power button driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ depends on INPUT
+ ---help---
+ This option adds a power button driver for Basin Cove PMIC
+ on Intel Merrifield devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_mrfld_pwrbtn.
+
config I2C_MULTI_INSTANTIATE
tristate "I2C multi instantiate pseudo device driver"
depends on I2C && ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 86cb76677bc8..87b0069bd781 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index f10af5c383c5..83fd7677af24 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -522,23 +522,22 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
input.length = (acpi_size) sizeof(*in_args);
input.pointer = in_args;
- if (out_data != NULL) {
+ if (out_data) {
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL;
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, &output);
- } else
+ if (ACPI_SUCCESS(status)) {
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *out_data = (u32)obj->integer.value;
+ }
+ kfree(output.pointer);
+ } else {
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, NULL);
-
- if (ACPI_SUCCESS(status) && out_data != NULL) {
- obj = (union acpi_object *)output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- *out_data = (u32) obj->integer.value;
}
- kfree(output.pointer);
return status;
-
}
/*
@@ -588,7 +587,7 @@ static ssize_t show_hdmi_source(struct device *dev,
return scnprintf(buf, PAGE_SIZE,
"input [gpu] unknown\n");
}
- pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data);
+ pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ee1fa93708ec..f94691615881 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -66,10 +66,13 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
+#define NOTIFY_FNLOCK_TOGGLE 0x4e
#define NOTIFY_KBD_BRTUP 0xc4
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
+#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
+
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06
@@ -177,6 +180,8 @@ struct asus_wmi {
struct workqueue_struct *hotplug_workqueue;
struct work_struct hotplug_work;
+ bool fnlock_locked;
+
struct asus_wmi_debug debug;
struct asus_wmi_driver *driver;
@@ -1619,6 +1624,23 @@ static int is_display_toggle(int code)
return 0;
}
+static bool asus_wmi_has_fnlock_key(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FNLOCK, &result);
+
+ return (result & ASUS_WMI_DSTS_PRESENCE_BIT) &&
+ !(result & ASUS_WMI_FNLOCK_BIOS_DISABLED);
+}
+
+static void asus_wmi_fnlock_update(struct asus_wmi *asus)
+{
+ int mode = asus->fnlock_locked;
+
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_FNLOCK, mode, NULL);
+}
+
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
@@ -1680,6 +1702,12 @@ static void asus_wmi_notify(u32 value, void *context)
goto exit;
}
+ if (code == NOTIFY_FNLOCK_TOGGLE) {
+ asus->fnlock_locked = !asus->fnlock_locked;
+ asus_wmi_fnlock_update(asus);
+ goto exit;
+ }
+
if (is_display_toggle(code) &&
asus->driver->quirks->no_display_toggle)
goto exit;
@@ -2134,6 +2162,11 @@ static int asus_wmi_add(struct platform_device *pdev)
} else
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
+ if (asus_wmi_has_fnlock_key(asus)) {
+ asus->fnlock_locked = true;
+ asus_wmi_fnlock_update(asus);
+ }
+
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);
if (ACPI_FAILURE(status)) {
@@ -2213,6 +2246,8 @@ static int asus_hotk_resume(struct device *device)
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
+ if (asus_wmi_has_fnlock_key(asus))
+ asus_wmi_fnlock_update(asus);
return 0;
}
@@ -2249,6 +2284,8 @@ static int asus_hotk_restore(struct device *device)
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
+ if (asus_wmi_has_fnlock_key(asus))
+ asus_wmi_fnlock_update(asus);
return 0;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 95e6ca116e00..a561f653cf13 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -531,7 +531,7 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
return;
}
- dell_fill_request(&buffer, 0, 0x2, 0, 0);
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
hwswitch = buffer.output[1];
@@ -562,7 +562,7 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
return ret;
status = buffer.output[1];
- dell_fill_request(&buffer, 0, 0x2, 0, 0);
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (hwswitch_ret)
return hwswitch_ret;
@@ -647,7 +647,7 @@ static void dell_update_rfkill(struct work_struct *ignored)
if (ret != 0)
return;
- dell_fill_request(&buffer, 0, 0x2, 0, 0);
+ dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (ret == 0 && (status & BIT(0)))
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index f3afe778001e..56535d7222dd 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -18,6 +18,8 @@
#include <linux/rfkill.h>
#include <linux/input.h>
+#include "dell-rbtn.h"
+
enum rbtn_type {
RBTN_UNKNOWN,
RBTN_TOGGLE,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index c53ae86b59c7..2d94536dea88 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -980,312 +980,21 @@ static void ideapad_wmi_notify(u32 value, void *context)
#endif
/*
- * Some ideapads don't have a hardware rfkill switch, reading VPCCMD_R_RF
- * always results in 0 on these models, causing ideapad_laptop to wrongly
- * report all radios as hardware-blocked.
+ * Some ideapads have a hardware rfkill switch, but most do not have one.
+ * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill,
+ * switch causing ideapad_laptop to wrongly report all radios as hw-blocked.
+ * There used to be a long list of DMI ids for models without a hw rfkill
+ * switch here, but that resulted in playing whack a mole.
+ * More importantly wrongly reporting the wifi radio as hw-blocked, results in
+ * non working wifi. Whereas not reporting it hw-blocked, when it actually is
+ * hw-blocked results in an empty SSID list, which is a much more benign
+ * failure mode.
+ * So the default now is the much safer option of assuming there is no
+ * hardware rfkill switch. This default also actually matches most hardware,
+ * since having a hw rfkill switch is quite rare on modern hardware, so this
+ * also leads to a much shorter list.
*/
-static const struct dmi_system_id no_hw_rfkill_list[] = {
- {
- .ident = "Lenovo RESCUER R720-15IKBN",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"),
- },
- },
- {
- .ident = "Lenovo G40-30",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
- },
- },
- {
- .ident = "Lenovo G50-30",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
- },
- },
- {
- .ident = "Lenovo V310-14IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14IKB"),
- },
- },
- {
- .ident = "Lenovo V310-14ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14ISK"),
- },
- },
- {
- .ident = "Lenovo V310-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15IKB"),
- },
- },
- {
- .ident = "Lenovo V310-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"),
- },
- },
- {
- .ident = "Lenovo V510-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V510-15IKB"),
- },
- },
- {
- .ident = "Lenovo ideapad 300-15IBR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IBR"),
- },
- },
- {
- .ident = "Lenovo ideapad 300-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IKB"),
- },
- },
- {
- .ident = "Lenovo ideapad 300S-11IBR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300S-11BR"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15ABR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ABR"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15IAP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IAP"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"),
- },
- },
- {
- .ident = "Lenovo ideapad 310-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad 330-15ICH",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 330-15ICH"),
- },
- },
- {
- .ident = "Lenovo ideapad 530S-14ARR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 530S-14ARR"),
- },
- },
- {
- .ident = "Lenovo ideapad S130-14IGM",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad S130-14IGM"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-14ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-14ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-15ACZ",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ACZ"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700 Touch-15ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad Y700-17ISK",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
- },
- },
- {
- .ident = "Lenovo ideapad MIIX 720-12IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 720-12IKB"),
- },
- },
- {
- .ident = "Lenovo Legion Y520-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
- },
- },
- {
- .ident = "Lenovo Y520-15IKBM",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBM"),
- },
- },
- {
- .ident = "Lenovo Legion Y530-15ICH",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH"),
- },
- },
- {
- .ident = "Lenovo Legion Y530-15ICH-1060",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH-1060"),
- },
- },
- {
- .ident = "Lenovo Legion Y720-15IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKB"),
- },
- },
- {
- .ident = "Lenovo Legion Y720-15IKBN",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBN"),
- },
- },
- {
- .ident = "Lenovo Y720-15IKBM",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBM"),
- },
- },
- {
- .ident = "Lenovo Yoga 2 11 / 13 / Pro",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"),
- },
- },
- {
- .ident = "Lenovo Yoga 2 11 / 13 / Pro",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_BOARD_NAME, "Yoga2"),
- },
- },
- {
- .ident = "Lenovo Yoga 2 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Yoga 2 13"),
- },
- },
- {
- .ident = "Lenovo Yoga 3 1170 / 1470",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3"),
- },
- },
- {
- .ident = "Lenovo Yoga 3 Pro 1370",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"),
- },
- },
- {
- .ident = "Lenovo Yoga 700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
- },
- },
- {
- .ident = "Lenovo Yoga 900",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
- },
- },
- {
- .ident = "Lenovo Yoga 900",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_BOARD_NAME, "VIUU4"),
- },
- },
- {
- .ident = "Lenovo YOGA 910-13IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
- },
- },
- {
- .ident = "Lenovo YOGA 920-13IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"),
- },
- },
- {
- .ident = "Lenovo YOGA C930-13IKB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA C930-13IKB"),
- },
- },
- {
- .ident = "Lenovo Zhaoyang E42-80",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ZHAOYANG E42-80"),
- },
- },
+static const struct dmi_system_id hw_rfkill_list[] = {
{}
};
@@ -1311,7 +1020,7 @@ static int ideapad_acpi_add(struct platform_device *pdev)
priv->cfg = cfg;
priv->adev = adev;
priv->platform_device = pdev;
- priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list);
+ priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
ret = ideapad_sysfs_init(priv);
if (ret)
diff --git a/drivers/platform/x86/intel_mrfld_pwrbtn.c b/drivers/platform/x86/intel_mrfld_pwrbtn.c
new file mode 100644
index 000000000000..d58fea51747e
--- /dev/null
+++ b/drivers/platform/x86/intel_mrfld_pwrbtn.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power-button driver for Basin Cove PMIC
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+
+#define BCOVE_PBSTATUS 0x27
+#define BCOVE_PBSTATUS_PBLVL BIT(4) /* 1 - release, 0 - press */
+
+static irqreturn_t mrfld_pwrbtn_interrupt(int irq, void *dev_id)
+{
+ struct input_dev *input = dev_id;
+ struct device *dev = input->dev.parent;
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int state;
+ int ret;
+
+ ret = regmap_read(regmap, BCOVE_PBSTATUS, &state);
+ if (ret)
+ return IRQ_NONE;
+
+ dev_dbg(dev, "PBSTATUS=0x%x\n", state);
+ input_report_key(input, KEY_POWER, !(state & BCOVE_PBSTATUS_PBLVL));
+ input_sync(input);
+
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ return IRQ_HANDLED;
+}
+
+static int mrfld_pwrbtn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct input_dev *input;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+ input->name = pdev->name;
+ input->phys = "power-button/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = dev;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ ret = input_register_device(input);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, pmic->regmap);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_pwrbtn_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ input);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pmic->regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ regmap_update_bits(pmic->regmap, BCOVE_MPBIRQ, BCOVE_PBIRQ_PBTN, 0);
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ return 0;
+}
+
+static int mrfld_pwrbtn_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ return 0;
+}
+
+static const struct platform_device_id mrfld_pwrbtn_id_table[] = {
+ { .name = "mrfld_bcove_pwrbtn" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_pwrbtn_id_table);
+
+static struct platform_driver mrfld_pwrbtn_driver = {
+ .driver = {
+ .name = "mrfld_bcove_pwrbtn",
+ },
+ .probe = mrfld_pwrbtn_probe,
+ .remove = mrfld_pwrbtn_remove,
+ .id_table = mrfld_pwrbtn_id_table,
+};
+module_platform_driver(mrfld_pwrbtn_driver);
+
+MODULE_DESCRIPTION("Power-button driver for Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index f2c621b55f49..1d902230ba61 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -19,6 +19,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
#include <linux/uaccess.h>
#include <asm/cpu_device_id.h>
@@ -828,7 +830,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
* the platform BIOS enforces 24Mhx Crystal to shutdown
* before PMC can assert SLP_S0#.
*/
-int quirk_xtal_ignore(const struct dmi_system_id *id)
+static int quirk_xtal_ignore(const struct dmi_system_id *id)
{
struct pmc_dev *pmcdev = &pmc;
u32 value;
@@ -854,13 +856,17 @@ static const struct dmi_system_id pmc_core_dmi_table[] = {
{}
};
-static int __init pmc_core_probe(void)
+static int pmc_core_probe(struct platform_device *pdev)
{
+ static bool device_initialized;
struct pmc_dev *pmcdev = &pmc;
const struct x86_cpu_id *cpu_id;
u64 slp_s0_addr;
int err;
+ if (device_initialized)
+ return -ENODEV;
+
cpu_id = x86_match_cpu(intel_pmc_core_ids);
if (!cpu_id)
return -ENODEV;
@@ -886,30 +892,178 @@ static int __init pmc_core_probe(void)
return -ENOMEM;
mutex_init(&pmcdev->lock);
+ platform_set_drvdata(pdev, pmcdev);
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+ dmi_check_system(pmc_core_dmi_table);
err = pmc_core_dbgfs_register(pmcdev);
if (err < 0) {
- pr_warn(" debugfs register failed.\n");
+ dev_warn(&pdev->dev, "debugfs register failed.\n");
iounmap(pmcdev->regbase);
return err;
}
- dmi_check_system(pmc_core_dmi_table);
- pr_info(" initialized\n");
+ device_initialized = true;
+ dev_info(&pdev->dev, " initialized\n");
+
return 0;
}
-module_init(pmc_core_probe)
-static void __exit pmc_core_remove(void)
+static int pmc_core_remove(struct platform_device *pdev)
{
- struct pmc_dev *pmcdev = &pmc;
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
pmc_core_dbgfs_unregister(pmcdev);
+ platform_set_drvdata(pdev, NULL);
mutex_destroy(&pmcdev->lock);
iounmap(pmcdev->regbase);
+ return 0;
}
-module_exit(pmc_core_remove)
+
+#ifdef CONFIG_PM_SLEEP
+
+static bool warn_on_s0ix_failures;
+module_param(warn_on_s0ix_failures, bool, 0644);
+MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
+
+static int pmc_core_suspend(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+
+ pmcdev->check_counters = false;
+
+ /* No warnings on S0ix failures */
+ if (!warn_on_s0ix_failures)
+ return 0;
+
+ /* Check if the syspend will actually use S0ix */
+ if (pm_suspend_via_firmware())
+ return 0;
+
+ /* Save PC10 residency for checking later */
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
+ return -EIO;
+
+ /* Save S0ix residency for checking later */
+ if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
+ return -EIO;
+
+ pmcdev->check_counters = true;
+ return 0;
+}
+
+static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+{
+ u64 pc10_counter;
+
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ return false;
+
+ if (pc10_counter == pmcdev->pc10_counter)
+ return true;
+
+ return false;
+}
+
+static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
+{
+ u64 s0ix_counter;
+
+ if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
+ return false;
+
+ if (s0ix_counter == pmcdev->s0ix_counter)
+ return true;
+
+ return false;
+}
+
+static int pmc_core_resume(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
+ int offset = pmcdev->map->slps0_dbg_offset;
+ const struct pmc_bit_map *map;
+ u32 data;
+
+ if (!pmcdev->check_counters)
+ return 0;
+
+ if (!pmc_core_is_s0ix_failed(pmcdev))
+ return 0;
+
+ if (pmc_core_is_pc10_failed(pmcdev)) {
+ /* S0ix failed because of PC10 entry failure */
+ dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
+ pmcdev->pc10_counter);
+ return 0;
+ }
+
+ /* The real interesting case - S0ix failed - lets ask PMC why. */
+ dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
+ pmcdev->s0ix_counter);
+ while (*maps) {
+ map = *maps;
+ data = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ while (map->name) {
+ dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ map++;
+ }
+ maps++;
+ }
+ return 0;
+}
+
+#endif
+
+static const struct dev_pm_ops pmc_core_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
+};
+
+static struct platform_driver pmc_core_driver = {
+ .driver = {
+ .name = "intel_pmc_core",
+ .pm = &pmc_core_pm_ops,
+ },
+ .probe = pmc_core_probe,
+ .remove = pmc_core_remove,
+};
+
+static struct platform_device pmc_core_device = {
+ .name = "intel_pmc_core",
+};
+
+static int __init pmc_core_init(void)
+{
+ int ret;
+
+ if (!x86_match_cpu(intel_pmc_core_ids))
+ return -ENODEV;
+
+ ret = platform_driver_register(&pmc_core_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_device_register(&pmc_core_device);
+ if (ret) {
+ platform_driver_unregister(&pmc_core_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit pmc_core_exit(void)
+{
+ platform_device_unregister(&pmc_core_device);
+ platform_driver_unregister(&pmc_core_driver);
+}
+
+module_init(pmc_core_init)
+module_exit(pmc_core_exit)
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel PMC Core Driver");
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index 88d9c0653a5f..fdee5772e532 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -241,6 +241,9 @@ struct pmc_reg_map {
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
* used to read MPHY PG and PLL status are available
* @mutex_lock: mutex to complete one transcation
+ * @check_counters: On resume, check if counters are getting incremented
+ * @pc10_counter: PC10 residency counter
+ * @s0ix_counter: S0ix residency (step adjusted)
*
* pmc_dev contains info about power management controller device.
*/
@@ -253,6 +256,10 @@ struct pmc_dev {
#endif /* CONFIG_DEBUG_FS */
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
+
+ bool check_counters; /* Check for counter increments on resume */
+ u64 pc10_counter;
+ u64 s0ix_counter;
};
#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 7964ba22ef8d..55037ff258f8 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -40,14 +40,14 @@
* The ARC handles the interrupt and services it, writing optional data to
* the IPC1 registers, updates the IPC_STS response register with the status.
*/
-#define IPC_CMD 0x0
-#define IPC_CMD_MSI 0x100
+#define IPC_CMD 0x00
+#define IPC_CMD_MSI BIT(8)
#define IPC_CMD_SIZE 16
#define IPC_CMD_SUBCMD 12
#define IPC_STATUS 0x04
-#define IPC_STATUS_IRQ 0x4
-#define IPC_STATUS_ERR 0x2
-#define IPC_STATUS_BUSY 0x1
+#define IPC_STATUS_IRQ BIT(2)
+#define IPC_STATUS_ERR BIT(1)
+#define IPC_STATUS_BUSY BIT(0)
#define IPC_SPTR 0x08
#define IPC_DPTR 0x0C
#define IPC_WRITE_BUFFER 0x80
@@ -101,13 +101,13 @@
#define TELEM_SSRAM_SIZE 240
#define TELEM_PMC_SSRAM_OFFSET 0x1B00
#define TELEM_PUNIT_SSRAM_OFFSET 0x1A00
-#define TCO_PMC_OFFSET 0x8
-#define TCO_PMC_SIZE 0x4
+#define TCO_PMC_OFFSET 0x08
+#define TCO_PMC_SIZE 0x04
/* PMC register bit definitions */
/* PMC_CFG_REG bit masks */
-#define PMC_CFG_NO_REBOOT_MASK (1 << 4)
+#define PMC_CFG_NO_REBOOT_MASK BIT_MASK(4)
#define PMC_CFG_NO_REBOOT_EN (1 << 4)
#define PMC_CFG_NO_REBOOT_DIS (0 << 4)
@@ -131,6 +131,7 @@ static struct intel_pmc_ipc_dev {
/* punit */
struct platform_device *punit_dev;
+ unsigned int punit_res_count;
/* Telemetry */
resource_size_t telem_pmc_ssram_base;
@@ -682,7 +683,7 @@ static int ipc_create_punit_device(void)
.name = PUNIT_DEVICE_NAME,
.id = -1,
.res = punit_res_array,
- .num_res = ARRAY_SIZE(punit_res_array),
+ .num_res = ipcdev.punit_res_count,
};
pdev = platform_device_register_full(&pdevinfo);
@@ -771,13 +772,17 @@ static int ipc_create_pmc_devices(void)
if (ret) {
dev_err(ipcdev.dev, "Failed to add punit platform device\n");
platform_device_unregister(ipcdev.tco_dev);
+ return ret;
}
if (!ipcdev.telem_res_inval) {
ret = ipc_create_telemetry_device();
- if (ret)
+ if (ret) {
dev_warn(ipcdev.dev,
"Failed to add telemetry platform device\n");
+ platform_device_unregister(ipcdev.punit_dev);
+ platform_device_unregister(ipcdev.tco_dev);
+ }
}
return ret;
@@ -785,7 +790,7 @@ static int ipc_create_pmc_devices(void)
static int ipc_plat_get_res(struct platform_device *pdev)
{
- struct resource *res, *punit_res;
+ struct resource *res, *punit_res = punit_res_array;
void __iomem *addr;
int size;
@@ -800,7 +805,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %pR\n", res);
- punit_res = punit_res_array;
+ ipcdev.punit_res_count = 0;
+
/* This is index 0 to cover BIOS data register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_DATA_INDEX);
@@ -808,7 +814,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get res of punit BIOS data\n");
return -ENXIO;
}
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
/* This is index 1 to cover BIOS interface register */
@@ -818,42 +824,38 @@ static int ipc_plat_get_res(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
return -ENXIO;
}
- *++punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
/* This is index 2 to cover ISP data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_DATA_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
}
/* This is index 3 to cover ISP interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_IFACE_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
}
/* This is index 4 to cover GTD data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_DATA_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
}
/* This is index 5 to cover GTD interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_IFACE_INDEX);
- ++punit_res;
if (res) {
- *punit_res = *res;
+ punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
}
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index 79671927f4ef..ab7ae1950867 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_IFACE
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 48fa7573e29b..cee039f57499 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -56,6 +56,16 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
+#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
+#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
+#define MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET 0xcb
+#define MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET 0xcd
+#define MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET 0xce
+#define MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET 0xcf
+#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
+#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
+#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
@@ -72,6 +82,7 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
+#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
@@ -128,6 +139,18 @@
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
+/* Masks and default values for watchdogs */
+#define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1)
+#define MLXPLAT_CPLD_WD2_CLEAR_MASK (GENMASK(7, 0) & ~BIT(1))
+
+#define MLXPLAT_CPLD_WD_TYPE1_TO_MASK GENMASK(7, 4)
+#define MLXPLAT_CPLD_WD_TYPE2_TO_MASK 0
+#define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1)
+#define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4))
+#define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7))
+#define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30
+#define MLXPLAT_CPLD_WD_MAX_DEVS 2
+
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
@@ -135,6 +158,7 @@
* @pdev_led - led platform devices
* @pdev_io_regs - register access platform devices
* @pdev_fan - FAN platform devices
+ * @pdev_wd - array of watchdog platform devices
*/
struct mlxplat_priv {
struct platform_device *pdev_i2c;
@@ -143,6 +167,7 @@ struct mlxplat_priv {
struct platform_device *pdev_led;
struct platform_device *pdev_io_regs;
struct platform_device *pdev_fan;
+ struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS];
};
/* Regions for LPC I2C controller and LPC base register space */
@@ -1339,6 +1364,10 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(3),
},
+ {
+ .label = "conf",
+ .capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET,
+ },
};
static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
@@ -1346,6 +1375,148 @@ static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data),
};
+/* Watchdog type1: hardware implementation version1
+ * (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140 systems).
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type1[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type1[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
+ .bit = 1,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type1[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type1,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type1),
+ .version = MLX_WDT_TYPE1,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type1,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type1),
+ .version = MLX_WDT_TYPE1,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
+/* Watchdog type2: hardware implementation version 2
+ * (all systems except (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140).
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type2[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+ .bit = 0,
+ },
+ {
+ .label = "reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .bit = 6,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type2[] = {
+ {
+ .label = "action",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+ {
+ .label = "timeout",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
+ },
+ {
+ .label = "timeleft",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+ },
+ {
+ .label = "ping",
+ .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+ .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+ .bit = 4,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = {
+ {
+ .data = mlxplat_mlxcpld_wd_main_regs_type2,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type2),
+ .version = MLX_WDT_TYPE2,
+ .identity = "mlx-wdt-main",
+ },
+ {
+ .data = mlxplat_mlxcpld_wd_aux_regs_type2,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type2),
+ .version = MLX_WDT_TYPE2,
+ .identity = "mlx-wdt-aux",
+ },
+};
+
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -1368,6 +1539,14 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true;
@@ -1411,6 +1590,16 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
@@ -1428,6 +1617,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
return true;
}
return false;
@@ -1467,6 +1657,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
@@ -1484,6 +1678,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
return true;
}
return false;
@@ -1493,6 +1688,7 @@ static const struct reg_default mlxplat_mlxcpld_regmap_default[] = {
{ MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
struct mlxplat_mlxcpld_regmap_context {
@@ -1542,6 +1738,8 @@ static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug;
static struct mlxreg_core_platform_data *mlxplat_led;
static struct mlxreg_core_platform_data *mlxplat_regs_io;
static struct mlxreg_core_platform_data *mlxplat_fan;
+static struct mlxreg_core_platform_data
+ *mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
@@ -1557,6 +1755,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_default_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1575,6 +1774,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1593,6 +1793,7 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1611,6 +1812,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
};
@@ -1630,6 +1832,8 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_led = &mlxplat_default_ng_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
return 1;
};
@@ -1912,15 +2116,33 @@ static int __init mlxplat_init(void)
}
}
+ /* Add WD drivers. */
+ for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
+ if (mlxplat_wd_data[j]) {
+ mlxplat_wd_data[j]->regmap = mlxplat_hotplug->regmap;
+ priv->pdev_wd[j] = platform_device_register_resndata(
+ &mlxplat_dev->dev, "mlx-wdt",
+ j, NULL, 0,
+ mlxplat_wd_data[j],
+ sizeof(*mlxplat_wd_data[j]));
+ if (IS_ERR(priv->pdev_wd[j])) {
+ err = PTR_ERR(priv->pdev_wd[j]);
+ goto fail_platform_wd_register;
+ }
+ }
+ }
+
/* Sync registers with hardware. */
regcache_mark_dirty(mlxplat_hotplug->regmap);
err = regcache_sync(mlxplat_hotplug->regmap);
if (err)
- goto fail_platform_fan_register;
+ goto fail_platform_wd_register;
return 0;
-fail_platform_fan_register:
+fail_platform_wd_register:
+ while (--j >= 0)
+ platform_device_unregister(priv->pdev_wd[j]);
if (mlxplat_fan)
platform_device_unregister(priv->pdev_fan);
fail_platform_io_regs_register:
@@ -1946,6 +2168,8 @@ static void __exit mlxplat_exit(void)
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
int i;
+ for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--)
+ platform_device_unregister(priv->pdev_wd[i]);
if (priv->pdev_fan)
platform_device_unregister(priv->pdev_fan);
if (priv->pdev_io_regs)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 4bfbfa3f78e6..2058445fc456 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4424,14 +4424,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
}
return AE_OK;
}
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ return AE_OK;
+
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
resource->type);
+ return AE_CTRL_TERMINATE;
- case ACPI_RESOURCE_TYPE_END_TAG:
- return AE_OK;
}
- return AE_CTRL_TERMINATE;
}
static int sony_pic_possible_resources(struct acpi_device *device)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 726341f2b638..71cfaf26efd1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -79,7 +79,7 @@
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
-#include <linux/pci_ids.h>
+#include <linux/pci.h>
#include <linux/power_supply.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -4212,7 +4212,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
known_ev = true;
break;
}
- /* fallthrough to default */
+ /* fallthrough - to default */
default:
known_ev = false;
}
@@ -4501,6 +4501,74 @@ static void bluetooth_exit(void)
bluetooth_shutdown();
}
+static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ {
+ .ident = "ThinkPad E485",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KU"),
+ },
+ },
+ {
+ .ident = "ThinkPad E585",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KV"),
+ },
+ },
+ {
+ .ident = "ThinkPad A285 - 20MW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MW"),
+ },
+ },
+ {
+ .ident = "ThinkPad A285 - 20MX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MX"),
+ },
+ },
+ {
+ .ident = "ThinkPad A485 - 20MU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MU"),
+ },
+ },
+ {
+ .ident = "ThinkPad A485 - 20MV",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MV"),
+ },
+ },
+ {}
+};
+
+static const struct pci_device_id fwbug_cards_ids[] __initconst = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2526) },
+ {}
+};
+
+
+static int __init have_bt_fwbug(void)
+{
+ /*
+ * Some AMD based ThinkPads have a firmware bug that calling
+ * "GBDC" will cause bluetooth on Intel wireless cards blocked
+ */
+ if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ FW_BUG "disable bluetooth subdriver for Intel cards\n");
+ return 1;
+ } else
+ return 0;
+}
+
static int __init bluetooth_init(struct ibm_init_struct *iibm)
{
int res;
@@ -4513,7 +4581,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
- tp_features.bluetooth = hkey_handle &&
+ tp_features.bluetooth = !have_bt_fwbug() && hkey_handle &&
acpi_evalf(hkey_handle, &status, "GBDC", "qd");
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
@@ -5808,7 +5876,7 @@ static int led_set_status(const unsigned int led,
return -EPERM;
if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
(1 << led), led_sled_arg1[ledstatus]))
- rc = -EIO;
+ return -EIO;
break;
case TPACPI_LED_OLD:
/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
@@ -5832,10 +5900,10 @@ static int led_set_status(const unsigned int led,
return -EPERM;
if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
led, led_led_arg1[ledstatus]))
- rc = -EIO;
+ return -EIO;
break;
default:
- rc = -ENXIO;
+ return -ENXIO;
}
if (!rc)
@@ -6249,8 +6317,8 @@ static int thermal_get_sensor(int idx, s32 *value)
t = TP_EC_THERMAL_TMP8;
idx -= 8;
}
- /* fallthrough */
#endif
+ /* fallthrough */
case TPACPI_THERMAL_TPEC_8:
if (idx <= 7) {
if (!acpi_ec_read(t + idx, &tmp))
@@ -9890,6 +9958,37 @@ invalid:
return '\0';
}
+static void find_new_ec_fwstr(const struct dmi_header *dm, void *private)
+{
+ char *ec_fw_string = (char *) private;
+ const char *dmi_data = (const char *)dm;
+ /*
+ * ThinkPad Embedded Controller Program Table on newer models
+ *
+ * Offset | Name | Width | Description
+ * ----------------------------------------------------
+ * 0x00 | Type | BYTE | 0x8C
+ * 0x01 | Length | BYTE |
+ * 0x02 | Handle | WORD | Varies
+ * 0x04 | Signature | BYTEx6 | ASCII for "LENOVO"
+ * 0x0A | OEM struct offset | BYTE | 0x0B
+ * 0x0B | OEM struct number | BYTE | 0x07, for this structure
+ * 0x0C | OEM struct revision | BYTE | 0x01, for this format
+ * 0x0D | ECP version ID | STR ID |
+ * 0x0E | ECP release date | STR ID |
+ */
+
+ /* Return if data structure not match */
+ if (dm->type != 140 || dm->length < 0x0F ||
+ memcmp(dmi_data + 4, "LENOVO", 6) != 0 ||
+ dmi_data[0x0A] != 0x0B || dmi_data[0x0B] != 0x07 ||
+ dmi_data[0x0C] != 0x01)
+ return;
+
+ /* fwstr is the first 8byte string */
+ strncpy(ec_fw_string, dmi_data + 0x0F, 8);
+}
+
/* returns 0 - probe ok, or < 0 - probe error.
* Probe ok doesn't mean thinkpad found.
* On error, kfree() cleanup on tp->* is not performed, caller must do it */
@@ -9897,7 +9996,7 @@ static int __must_check __init get_thinkpad_model_data(
struct thinkpad_id_data *tp)
{
const struct dmi_device *dev = NULL;
- char ec_fw_string[18];
+ char ec_fw_string[18] = {0};
char const *s;
char t;
@@ -9937,20 +10036,25 @@ static int __must_check __init get_thinkpad_model_data(
ec_fw_string) == 1) {
ec_fw_string[sizeof(ec_fw_string) - 1] = 0;
ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
+ break;
+ }
+ }
- tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
- if (!tp->ec_version_str)
- return -ENOMEM;
+ /* Newer ThinkPads have different EC program info table */
+ if (!ec_fw_string[0])
+ dmi_walk(find_new_ec_fwstr, &ec_fw_string);
- t = tpacpi_parse_fw_id(ec_fw_string,
- &tp->ec_model, &tp->ec_release);
- if (t != 'H') {
- pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
- ec_fw_string);
- pr_notice("please report this to %s\n",
- TPACPI_MAIL);
- }
- break;
+ if (ec_fw_string[0]) {
+ tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
+ if (!tp->ec_version_str)
+ return -ENOMEM;
+
+ t = tpacpi_parse_fw_id(ec_fw_string,
+ &tp->ec_model, &tp->ec_release);
+ if (t != 'H') {
+ pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
+ ec_fw_string);
+ pr_notice("please report this to %s\n", TPACPI_MAIL);
}
}
@@ -10165,7 +10269,7 @@ MODULE_PARM_DESC(volume_mode,
module_param_named(volume_capabilities, volume_capabilities, uint, 0444);
MODULE_PARM_DESC(volume_capabilities,
- "Selects the mixer capabilites: 0=auto, 1=volume and mute, 2=mute only");
+ "Selects the mixer capabilities: 0=auto, 1=volume and mute, 2=mute only");
module_param_named(volume_control, volume_control_allowed, bool, 0444);
MODULE_PARM_DESC(volume_control,
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 2d56ff7c8230..bd0856d2e825 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -249,6 +249,21 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_data = {
.properties = jumper_ezpad_6_pro_props,
};
+static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_6_pro_b_props,
+};
+
static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
@@ -265,6 +280,23 @@ static const struct ts_dmi_data jumper_ezpad_mini3_data = {
.properties = jumper_ezpad_mini3_props,
};
+static const struct property_entry myria_my8307_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data myria_my8307_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = myria_my8307_props,
+};
+
static const struct property_entry onda_obook_20_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
@@ -674,6 +706,17 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Jumper EZpad 6 Pro B */
+ .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+ },
+ },
+ {
/* Jumper EZpad mini3 */
.driver_data = (void *)&jumper_ezpad_mini3_data,
.matches = {
@@ -691,6 +734,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Myria MY8307 */
+ .driver_data = (void *)&myria_my8307_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Complet Electro Serv"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MY8307"),
+ },
+ },
+ {
/* Onda oBook 20 Plus */
.driver_data = (void *)&onda_obook_20_plus_data,
.matches = {
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 2b686c55b717..e341cc5c0ea6 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -57,15 +57,21 @@
#define SHDW_WK_PIN(reg, cfg) ((reg) & AT91_SHDW_WKUPIS((cfg)->wkup_pin_input))
#define SHDW_RTCWK(reg, cfg) (((reg) >> ((cfg)->sr_rtcwk_shift)) & 0x1)
+#define SHDW_RTTWK(reg, cfg) (((reg) >> ((cfg)->sr_rttwk_shift)) & 0x1)
#define SHDW_RTCWKEN(cfg) (1 << ((cfg)->mr_rtcwk_shift))
+#define SHDW_RTTWKEN(cfg) (1 << ((cfg)->mr_rttwk_shift))
#define DBC_PERIOD_US(x) DIV_ROUND_UP_ULL((1000000 * (x)), \
SLOW_CLOCK_FREQ)
+#define SHDW_CFG_NOT_USED (32)
+
struct shdwc_config {
u8 wkup_pin_input;
u8 mr_rtcwk_shift;
+ u8 mr_rttwk_shift;
u8 sr_rtcwk_shift;
+ u8 sr_rttwk_shift;
};
struct shdwc {
@@ -104,6 +110,8 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
reason = "WKUP pin";
else if (SHDW_RTCWK(reg, shdw->cfg))
reason = "RTC";
+ else if (SHDW_RTTWK(reg, shdw->cfg))
+ reason = "RTT";
pr_info("AT91: Wake-Up source: %s\n", reason);
}
@@ -221,6 +229,9 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
mode |= SHDW_RTCWKEN(shdw->cfg);
+ if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
+ mode |= SHDW_RTTWKEN(shdw->cfg);
+
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
@@ -231,13 +242,27 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
static const struct shdwc_config sama5d2_shdwc_config = {
.wkup_pin_input = 0,
.mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = SHDW_CFG_NOT_USED,
.sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = SHDW_CFG_NOT_USED,
+};
+
+static const struct shdwc_config sam9x60_shdwc_config = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = 16,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = 4,
};
static const struct of_device_id at91_shdwc_of_match[] = {
{
.compatible = "atmel,sama5d2-shdwc",
.data = &sama5d2_shdwc_config,
+ },
+ {
+ .compatible = "microchip,sam9x60-shdwc",
+ .data = &sam9x60_shdwc_config,
}, {
/*sentinel*/
}
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 7d0d269a0837..5a6bb638c331 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -27,6 +27,7 @@
struct syscon_reboot_context {
struct regmap *map;
u32 offset;
+ u32 value;
u32 mask;
struct notifier_block restart_handler;
};
@@ -39,7 +40,7 @@ static int syscon_restart_handle(struct notifier_block *this,
restart_handler);
/* Issue the reboot */
- regmap_write(ctx->map, ctx->offset, ctx->mask);
+ regmap_update_bits(ctx->map, ctx->offset, ctx->mask, ctx->value);
mdelay(1000);
@@ -51,6 +52,7 @@ static int syscon_reboot_probe(struct platform_device *pdev)
{
struct syscon_reboot_context *ctx;
struct device *dev = &pdev->dev;
+ int mask_err, value_err;
int err;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -64,8 +66,21 @@ static int syscon_reboot_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset))
return -EINVAL;
- if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ value_err = of_property_read_u32(pdev->dev.of_node, "value", &ctx->value);
+ mask_err = of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask);
+ if (value_err && mask_err) {
+ dev_err(dev, "unable to read 'value' and 'mask'");
return -EINVAL;
+ }
+
+ if (value_err) {
+ /* support old binding */
+ ctx->value = ctx->mask;
+ ctx->mask = 0xFFFFFFFF;
+ } else if (mask_err) {
+ /* support value without mask*/
+ ctx->mask = 0xFFFFFFFF;
+ }
ctx->restart_handler.notifier_call = syscon_restart_handle;
ctx->restart_handler.priority = 192;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index e901b9879e7e..26dacdab03cc 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -169,6 +169,17 @@ config BATTERY_COLLIE
Say Y to enable support for the battery on the Sharp Zaurus
SL-5500 (collie) models.
+config BATTERY_INGENIC
+ tristate "Ingenic JZ47xx SoCs battery driver"
+ depends on MIPS || COMPILE_TEST
+ depends on INGENIC_ADC
+ help
+ Choose this option if you want to monitor battery status on
+ Ingenic JZ47xx SoC based devices.
+
+ This driver can also be built as a module. If so, the module will be
+ called ingenic-battery.
+
config BATTERY_IPAQ_MICRO
tristate "iPAQ Atmel Micro ASIC battery driver"
depends on MFD_IPAQ_MICRO
@@ -475,12 +486,12 @@ config CHARGER_MANAGER
runtime and in suspend-to-RAM by waking up the system periodically
with help of suspend_again support.
-config CHARGER_LTC3651
- tristate "LTC3651 charger"
+config CHARGER_LT3651
+ tristate "Analog Devices LT3651 charger"
depends on GPIOLIB
help
- Say Y to include support for the LTC3651 battery charger which reports
- its status via GPIO lines.
+ Say Y to include support for the Analog Devices (Linear Technology)
+ LT3651 battery charger which reports its status via GPIO lines.
config CHARGER_MAX14577
tristate "Maxim MAX14577/77836 battery charger driver"
@@ -499,6 +510,13 @@ config CHARGER_DETECTOR_MAX14656
Revision 1.2 and can be found e.g. in Kindle 4/5th generation
readers and certain LG devices.
+config CHARGER_MAX77650
+ tristate "Maxim MAX77650 battery charger driver"
+ depends on MFD_MAX77650
+ help
+ Say Y to enable support for the battery charger control of MAX77650
+ PMICs.
+
config CHARGER_MAX77693
tristate "Maxim MAX77693 battery charger driver"
depends on MFD_MAX77693
@@ -660,4 +678,14 @@ config FUEL_GAUGE_SC27XX
Say Y here to enable support for fuel gauge with SC27XX
PMIC chips.
+config CHARGER_UCS1002
+ tristate "Microchip UCS1002 USB Port Power Controller"
+ depends on I2C
+ depends on OF
+ depends on REGULATOR
+ select REGMAP_I2C
+ help
+ Say Y to enable support for Microchip UCS1002 Programmable
+ USB Port Power Controller with Charger Emulation.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index b731c2a9b695..f208273f9686 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
+obj-$(CONFIG_BATTERY_INGENIC) += ingenic-battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
@@ -67,9 +68,10 @@ obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
-obj-$(CONFIG_CHARGER_LTC3651) += ltc3651-charger.o
+obj-$(CONFIG_CHARGER_LT3651) += lt3651-charger.o
obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_DETECTOR_MAX14656) += max14656_charger_detector.o
+obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
@@ -87,3 +89,4 @@ obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o
+obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
index 7b2b69916f48..f6a66979cbb5 100644
--- a/drivers/power/supply/ab8500_bmdata.c
+++ b/drivers/power/supply/ab8500_bmdata.c
@@ -508,6 +508,7 @@ int ab8500_bm_of_probe(struct device *dev,
btech = of_get_property(battery_node, "stericsson,battery-type", NULL);
if (!btech) {
dev_warn(dev, "missing property battery-name/type\n");
+ of_node_put(battery_node);
return -EINVAL;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index f52fe77edb6f..d2b1255ee1cc 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -24,6 +24,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/iio/consumer.h>
+#include <linux/workqueue.h>
#define DRVNAME "axp20x-usb-power-supply"
@@ -36,16 +37,27 @@
#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
#define AXP20X_VBUS_VHOLD_OFFSET 3
#define AXP20X_VBUS_CLIMIT_MASK 3
-#define AXP20X_VBUC_CLIMIT_900mA 0
-#define AXP20X_VBUC_CLIMIT_500mA 1
-#define AXP20X_VBUC_CLIMIT_100mA 2
-#define AXP20X_VBUC_CLIMIT_NONE 3
+#define AXP20X_VBUS_CLIMIT_900mA 0
+#define AXP20X_VBUS_CLIMIT_500mA 1
+#define AXP20X_VBUS_CLIMIT_100mA 2
+#define AXP20X_VBUS_CLIMIT_NONE 3
+
+#define AXP813_VBUS_CLIMIT_900mA 0
+#define AXP813_VBUS_CLIMIT_1500mA 1
+#define AXP813_VBUS_CLIMIT_2000mA 2
+#define AXP813_VBUS_CLIMIT_2500mA 3
#define AXP20X_ADC_EN1_VBUS_CURR BIT(2)
#define AXP20X_ADC_EN1_VBUS_VOLT BIT(3)
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+/*
+ * Note do not raise the debounce time, we must report Vusb high within
+ * 100ms otherwise we get Vbus errors in musb.
+ */
+#define DEBOUNCE_TIME msecs_to_jiffies(50)
+
struct axp20x_usb_power {
struct device_node *np;
struct regmap *regmap;
@@ -53,6 +65,8 @@ struct axp20x_usb_power {
enum axp20x_variants axp20x_id;
struct iio_channel *vbus_v;
struct iio_channel *vbus_i;
+ struct delayed_work vbus_detect;
+ unsigned int old_status;
};
static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
@@ -64,6 +78,89 @@ static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static void axp20x_usb_power_poll_vbus(struct work_struct *work)
+{
+ struct axp20x_usb_power *power =
+ container_of(work, struct axp20x_usb_power, vbus_detect.work);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &val);
+ if (ret)
+ goto out;
+
+ val &= (AXP20X_PWR_STATUS_VBUS_PRESENT | AXP20X_PWR_STATUS_VBUS_USED);
+ if (val != power->old_status)
+ power_supply_changed(power->supply);
+
+ power->old_status = val;
+
+out:
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+}
+
+static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
+{
+ if (power->axp20x_id >= AXP221_ID)
+ return true;
+
+ return false;
+}
+
+static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
+{
+ unsigned int v;
+ int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+
+ if (ret)
+ return ret;
+
+ switch (v & AXP20X_VBUS_CLIMIT_MASK) {
+ case AXP20X_VBUS_CLIMIT_100mA:
+ if (power->axp20x_id == AXP221_ID)
+ *val = -1; /* No 100mA limit */
+ else
+ *val = 100000;
+ break;
+ case AXP20X_VBUS_CLIMIT_500mA:
+ *val = 500000;
+ break;
+ case AXP20X_VBUS_CLIMIT_900mA:
+ *val = 900000;
+ break;
+ case AXP20X_VBUS_CLIMIT_NONE:
+ *val = -1;
+ break;
+ }
+
+ return 0;
+}
+
+static int axp813_get_current_max(struct axp20x_usb_power *power, int *val)
+{
+ unsigned int v;
+ int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+
+ if (ret)
+ return ret;
+
+ switch (v & AXP20X_VBUS_CLIMIT_MASK) {
+ case AXP813_VBUS_CLIMIT_900mA:
+ *val = 900000;
+ break;
+ case AXP813_VBUS_CLIMIT_1500mA:
+ *val = 1500000;
+ break;
+ case AXP813_VBUS_CLIMIT_2000mA:
+ *val = 2000000;
+ break;
+ case AXP813_VBUS_CLIMIT_2500mA:
+ *val = 2500000;
+ break;
+ }
+ return 0;
+}
+
static int axp20x_usb_power_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
@@ -102,28 +199,9 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 1700; /* 1 step = 1.7 mV */
return 0;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
- if (ret)
- return ret;
-
- switch (v & AXP20X_VBUS_CLIMIT_MASK) {
- case AXP20X_VBUC_CLIMIT_100mA:
- if (power->axp20x_id == AXP221_ID)
- val->intval = -1; /* No 100mA limit */
- else
- val->intval = 100000;
- break;
- case AXP20X_VBUC_CLIMIT_500mA:
- val->intval = 500000;
- break;
- case AXP20X_VBUC_CLIMIT_900mA:
- val->intval = 900000;
- break;
- case AXP20X_VBUC_CLIMIT_NONE:
- val->intval = -1;
- break;
- }
- return 0;
+ if (power->axp20x_id == AXP813_ID)
+ return axp813_get_current_max(power, &val->intval);
+ return axp20x_get_current_max(power, &val->intval);
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
ret = iio_read_channel_processed(power->vbus_i,
@@ -214,6 +292,31 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
return -EINVAL;
}
+static int axp813_usb_power_set_current_max(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val;
+
+ switch (intval) {
+ case 900000:
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_CLIMIT_MASK,
+ AXP813_VBUS_CLIMIT_900mA);
+ case 1500000:
+ case 2000000:
+ case 2500000:
+ val = (intval - 1000000) / 500000;
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_CLIMIT_MASK, val);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
int intval)
{
@@ -248,6 +351,9 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
return axp20x_usb_power_set_voltage_min(power, val->intval);
case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (power->axp20x_id == AXP813_ID)
+ return axp813_usb_power_set_current_max(power,
+ val->intval);
return axp20x_usb_power_set_current_max(power, val->intval);
default:
@@ -357,6 +463,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
+ platform_set_drvdata(pdev, power);
power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
&pdev->dev);
@@ -382,7 +489,8 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
usb_power_desc = &axp20x_usb_power_desc;
irq_names = axp20x_irq_names;
} else if (power->axp20x_id == AXP221_ID ||
- power->axp20x_id == AXP223_ID) {
+ power->axp20x_id == AXP223_ID ||
+ power->axp20x_id == AXP813_ID) {
usb_power_desc = &axp22x_usb_power_desc;
irq_names = axp22x_irq_names;
} else {
@@ -415,6 +523,19 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
irq_names[i], ret);
}
+ INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+ if (axp20x_usb_vbus_needs_polling(power))
+ queue_delayed_work(system_wq, &power->vbus_detect, 0);
+
+ return 0;
+}
+
+static int axp20x_usb_power_remove(struct platform_device *pdev)
+{
+ struct axp20x_usb_power *power = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&power->vbus_detect);
+
return 0;
}
@@ -428,12 +549,16 @@ static const struct of_device_id axp20x_usb_power_match[] = {
}, {
.compatible = "x-powers,axp223-usb-power-supply",
.data = (void *)AXP223_ID,
+ }, {
+ .compatible = "x-powers,axp813-usb-power-supply",
+ .data = (void *)AXP813_ID,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
static struct platform_driver axp20x_usb_power_driver = {
.probe = axp20x_usb_power_probe,
+ .remove = axp20x_usb_power_remove,
.driver = {
.name = DRVNAME,
.of_match_table = axp20x_usb_power_match,
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index f8c6da9277b3..00b961890a38 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -833,6 +833,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
pirq = platform_get_irq(info->pdev, i);
+ if (pirq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
+ return pirq;
+ }
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev,
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 9ff2461820d8..368281bc0d2b 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -686,6 +686,26 @@ intr_failed:
*/
static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
+ /* ACEPC T8 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ },
+ {
+ /* ACEPC T11 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ },
+ {
/* Intel Cherry Trail Compute Stick, Windows version */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 29b3a4056865..195c18c2f426 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1612,7 +1612,8 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
di->charge_design_full = bq27xxx_battery_read_dcap(di);
}
- if (di->cache.capacity != cache.capacity)
+ if ((di->cache.capacity != cache.capacity) ||
+ (di->cache.flags != cache.flags))
power_supply_changed(di->bat);
if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 2e8db5e6de0b..a6900aa0d2ed 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1987,6 +1987,9 @@ static struct platform_driver charger_manager_driver = {
static int __init charger_manager_init(void)
{
cm_wq = create_freezable_workqueue("charger_manager");
+ if (unlikely(!cm_wq))
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
return platform_driver_register(&charger_manager_driver);
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 6887870ba32c..61d6447d1966 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -82,9 +82,9 @@ struct cpcap_battery_config {
};
struct cpcap_coulomb_counter_data {
- s32 sample; /* 24-bits */
+ s32 sample; /* 24 or 32 bits */
s32 accumulator;
- s16 offset; /* 10-bits */
+ s16 offset; /* 9 bits */
};
enum cpcap_battery_state {
@@ -213,7 +213,7 @@ static int cpcap_battery_get_current(struct cpcap_battery_ddata *ddata)
* TI or ST coulomb counter in the PMIC.
*/
static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset, u32 divider)
{
s64 acc;
@@ -224,9 +224,6 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
if (!divider)
return 0;
- sample &= 0xffffff; /* 24-bits, unsigned */
- offset &= 0x7ff; /* 10-bits, signed */
-
switch (ddata->vendor) {
case CPCAP_VENDOR_ST:
cc_lsb = 95374; /* μAms per LSB */
@@ -259,7 +256,7 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
/* 3600000μAms = 1μAh */
static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset)
{
return cpcap_battery_cc_raw_div(ddata, sample,
@@ -268,7 +265,7 @@ static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata,
}
static int cpcap_battery_cc_to_ua(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset)
{
return cpcap_battery_cc_raw_div(ddata, sample,
@@ -312,17 +309,19 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
/* Sample value CPCAP_REG_CCS1 & 2 */
ccd->sample = (buf[1] & 0x0fff) << 16;
ccd->sample |= buf[0];
+ if (ddata->vendor == CPCAP_VENDOR_TI)
+ ccd->sample = sign_extend32(24, ccd->sample);
/* Accumulator value CPCAP_REG_CCA1 & 2 */
ccd->accumulator = ((s16)buf[3]) << 16;
ccd->accumulator |= buf[2];
- /* Offset value CPCAP_REG_CCO */
- ccd->offset = buf[5];
-
- /* Adjust offset based on mode value CPCAP_REG_CCM? */
- if (buf[4] >= 0x200)
- ccd->offset |= 0xfc00;
+ /*
+ * Coulomb counter calibration offset is CPCAP_REG_CCM,
+ * REG_CCO seems unused
+ */
+ ccd->offset = buf[4];
+ ccd->offset = sign_extend32(ccd->offset, 9);
return cpcap_battery_cc_to_uah(ddata,
ccd->sample,
@@ -477,11 +476,11 @@ static int cpcap_battery_get_property(struct power_supply *psy,
val->intval = ddata->config.info.voltage_min_design;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
- if (cached) {
+ sample = latest->cc.sample - previous->cc.sample;
+ if (!sample) {
val->intval = cpcap_battery_cc_get_avg_current(ddata);
break;
}
- sample = latest->cc.sample - previous->cc.sample;
accumulator = latest->cc.accumulator - previous->cc.accumulator;
val->intval = cpcap_battery_cc_to_ua(ddata, sample,
accumulator,
@@ -498,13 +497,13 @@ static int cpcap_battery_get_property(struct power_supply *psy,
val->intval = div64_s64(tmp, 100);
break;
case POWER_SUPPLY_PROP_POWER_AVG:
- if (cached) {
+ sample = latest->cc.sample - previous->cc.sample;
+ if (!sample) {
tmp = cpcap_battery_cc_get_avg_current(ddata);
tmp *= (latest->voltage / 10000);
val->intval = div64_s64(tmp, 100);
break;
}
- sample = latest->cc.sample - previous->cc.sample;
accumulator = latest->cc.accumulator - previous->cc.accumulator;
tmp = cpcap_battery_cc_to_ua(ddata, sample, accumulator,
latest->cc.offset);
@@ -562,11 +561,11 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
switch (d->action) {
case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
- if (latest->counter_uah >= 0)
+ if (latest->current_ua >= 0)
dev_warn(ddata->dev, "Battery low at 3.3V!\n");
break;
case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
- if (latest->counter_uah >= 0) {
+ if (latest->current_ua >= 0) {
dev_emerg(ddata->dev,
"Battery empty at 3.1V, powering off\n");
orderly_poweroff(true);
@@ -670,8 +669,9 @@ static int cpcap_battery_init_iio(struct cpcap_battery_ddata *ddata)
return 0;
out_err:
- dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
- error);
+ if (error != -EPROBE_DEFER)
+ dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+ error);
return error;
}
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index c3ed7b476676..b4781b5d1e10 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -574,8 +574,9 @@ static int cpcap_charger_init_iio(struct cpcap_charger_ddata *ddata)
return 0;
out_err:
- dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
- error);
+ if (error != -EPROBE_DEFER)
+ dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+ error);
return error;
}
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 7e4f11d5a230..f99e8f1eef23 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -29,11 +29,13 @@
struct gpio_charger {
unsigned int irq;
+ unsigned int charge_status_irq;
bool wakeup_enabled;
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct gpio_desc *gpiod;
+ struct gpio_desc *charge_status;
};
static irqreturn_t gpio_charger_irq(int irq, void *devid)
@@ -59,6 +61,12 @@ static int gpio_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
val->intval = gpiod_get_value_cansleep(gpio_charger->gpiod);
break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (gpiod_get_value_cansleep(gpio_charger->charge_status))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
default:
return -EINVAL;
}
@@ -93,8 +101,29 @@ static enum power_supply_type gpio_charger_get_type(struct device *dev)
return POWER_SUPPLY_TYPE_UNKNOWN;
}
+static int gpio_charger_get_irq(struct device *dev, void *dev_id,
+ struct gpio_desc *gpio)
+{
+ int ret, irq = gpiod_to_irq(gpio);
+
+ if (irq > 0) {
+ ret = devm_request_any_context_irq(dev, irq, gpio_charger_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ dev_name(dev),
+ dev_id);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to request irq: %d\n", ret);
+ irq = 0;
+ }
+ }
+
+ return irq;
+}
+
static enum power_supply_property gpio_charger_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS /* Must always be last in the array. */
};
static int gpio_charger_probe(struct platform_device *pdev)
@@ -104,8 +133,10 @@ static int gpio_charger_probe(struct platform_device *pdev)
struct power_supply_config psy_cfg = {};
struct gpio_charger *gpio_charger;
struct power_supply_desc *charger_desc;
+ struct gpio_desc *charge_status;
+ int charge_status_irq;
unsigned long flags;
- int irq, ret;
+ int ret;
if (!pdata && !dev->of_node) {
dev_err(dev, "No platform data\n");
@@ -151,9 +182,17 @@ static int gpio_charger_probe(struct platform_device *pdev)
return PTR_ERR(gpio_charger->gpiod);
}
+ charge_status = devm_gpiod_get_optional(dev, "charge-status", GPIOD_IN);
+ gpio_charger->charge_status = charge_status;
+ if (IS_ERR(gpio_charger->charge_status))
+ return PTR_ERR(gpio_charger->charge_status);
+
charger_desc = &gpio_charger->charger_desc;
charger_desc->properties = gpio_charger_properties;
charger_desc->num_properties = ARRAY_SIZE(gpio_charger_properties);
+ /* Remove POWER_SUPPLY_PROP_STATUS from the supported properties. */
+ if (!gpio_charger->charge_status)
+ charger_desc->num_properties -= 1;
charger_desc->get_property = gpio_charger_get_property;
psy_cfg.of_node = dev->of_node;
@@ -180,16 +219,12 @@ static int gpio_charger_probe(struct platform_device *pdev)
return ret;
}
- irq = gpiod_to_irq(gpio_charger->gpiod);
- if (irq > 0) {
- ret = devm_request_any_context_irq(dev, irq, gpio_charger_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(dev), gpio_charger->charger);
- if (ret < 0)
- dev_warn(dev, "Failed to request irq: %d\n", ret);
- else
- gpio_charger->irq = irq;
- }
+ gpio_charger->irq = gpio_charger_get_irq(dev, gpio_charger->charger,
+ gpio_charger->gpiod);
+
+ charge_status_irq = gpio_charger_get_irq(dev, gpio_charger->charger,
+ gpio_charger->charge_status);
+ gpio_charger->charge_status_irq = charge_status_irq;
platform_set_drvdata(pdev, gpio_charger);
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
new file mode 100644
index 000000000000..35816d4b3012
--- /dev/null
+++ b/drivers/power/supply/ingenic-battery.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Battery driver for the Ingenic JZ47xx SoCs
+ * Copyright (c) 2019 Artur Rojek <contact@artur-rojek.eu>
+ *
+ * based on drivers/power/supply/jz4740-battery.c
+ */
+
+#include <linux/iio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+
+struct ingenic_battery {
+ struct device *dev;
+ struct iio_channel *channel;
+ struct power_supply_desc desc;
+ struct power_supply *battery;
+ struct power_supply_battery_info info;
+};
+
+static int ingenic_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ingenic_battery *bat = power_supply_get_drvdata(psy);
+ struct power_supply_battery_info *info = &bat->info;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = iio_read_channel_processed(bat->channel, &val->intval);
+ val->intval *= 1000;
+ if (val->intval < info->voltage_min_design_uv)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (val->intval > info->voltage_max_design_uv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return ret;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = iio_read_channel_processed(bat->channel, &val->intval);
+ val->intval *= 1000;
+ return ret;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = info->voltage_min_design_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = info->voltage_max_design_uv;
+ return 0;
+ default:
+ return -EINVAL;
+ };
+}
+
+/* Set the most appropriate IIO channel voltage reference scale
+ * based on the battery's max voltage.
+ */
+static int ingenic_battery_set_scale(struct ingenic_battery *bat)
+{
+ const int *scale_raw;
+ int scale_len, scale_type, best_idx = -1, best_mV, max_raw, i, ret;
+ u64 max_mV;
+
+ ret = iio_read_max_channel_raw(bat->channel, &max_raw);
+ if (ret) {
+ dev_err(bat->dev, "Unable to read max raw channel value\n");
+ return ret;
+ }
+
+ ret = iio_read_avail_channel_attribute(bat->channel, &scale_raw,
+ &scale_type, &scale_len,
+ IIO_CHAN_INFO_SCALE);
+ if (ret < 0) {
+ dev_err(bat->dev, "Unable to read channel avail scale\n");
+ return ret;
+ }
+ if (ret != IIO_AVAIL_LIST || scale_type != IIO_VAL_FRACTIONAL_LOG2)
+ return -EINVAL;
+
+ max_mV = bat->info.voltage_max_design_uv / 1000;
+
+ for (i = 0; i < scale_len; i += 2) {
+ u64 scale_mV = (max_raw * scale_raw[i]) >> scale_raw[i + 1];
+
+ if (scale_mV < max_mV)
+ continue;
+
+ if (best_idx >= 0 && scale_mV > best_mV)
+ continue;
+
+ best_mV = scale_mV;
+ best_idx = i;
+ }
+
+ if (best_idx < 0) {
+ dev_err(bat->dev, "Unable to find matching voltage scale\n");
+ return -EINVAL;
+ }
+
+ return iio_write_channel_attribute(bat->channel,
+ scale_raw[best_idx],
+ scale_raw[best_idx + 1],
+ IIO_CHAN_INFO_SCALE);
+}
+
+static enum power_supply_property ingenic_battery_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+};
+
+static int ingenic_battery_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_battery *bat;
+ struct power_supply_config psy_cfg = {};
+ struct power_supply_desc *desc;
+ int ret;
+
+ bat = devm_kzalloc(dev, sizeof(*bat), GFP_KERNEL);
+ if (!bat)
+ return -ENOMEM;
+
+ bat->dev = dev;
+ bat->channel = devm_iio_channel_get(dev, "battery");
+ if (IS_ERR(bat->channel))
+ return PTR_ERR(bat->channel);
+
+ desc = &bat->desc;
+ desc->name = "jz-battery";
+ desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ desc->properties = ingenic_battery_properties;
+ desc->num_properties = ARRAY_SIZE(ingenic_battery_properties);
+ desc->get_property = ingenic_battery_get_property;
+ psy_cfg.drv_data = bat;
+ psy_cfg.of_node = dev->of_node;
+
+ bat->battery = devm_power_supply_register(dev, desc, &psy_cfg);
+ if (IS_ERR(bat->battery)) {
+ dev_err(dev, "Unable to register battery\n");
+ return PTR_ERR(bat->battery);
+ }
+
+ ret = power_supply_get_battery_info(bat->battery, &bat->info);
+ if (ret) {
+ dev_err(dev, "Unable to get battery info: %d\n", ret);
+ return ret;
+ }
+ if (bat->info.voltage_min_design_uv < 0) {
+ dev_err(dev, "Unable to get voltage min design\n");
+ return bat->info.voltage_min_design_uv;
+ }
+ if (bat->info.voltage_max_design_uv < 0) {
+ dev_err(dev, "Unable to get voltage max design\n");
+ return bat->info.voltage_max_design_uv;
+ }
+
+ return ingenic_battery_set_scale(bat);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ingenic_battery_of_match[] = {
+ { .compatible = "ingenic,jz4740-battery", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ingenic_battery_of_match);
+#endif
+
+static struct platform_driver ingenic_battery_driver = {
+ .driver = {
+ .name = "ingenic-battery",
+ .of_match_table = of_match_ptr(ingenic_battery_of_match),
+ },
+ .probe = ingenic_battery_probe,
+};
+module_platform_driver(ingenic_battery_driver);
+
+MODULE_DESCRIPTION("Battery driver for Ingenic JZ47xx SoCs");
+MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/ltc3651-charger.c b/drivers/power/supply/lt3651-charger.c
index eea63ff211c4..8de500ffad95 100644
--- a/drivers/power/supply/ltc3651-charger.c
+++ b/drivers/power/supply/lt3651-charger.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
+ * Driver for Analog Devices (Linear Technology) LT3651 charger IC.
* Copyright (C) 2017, Topic Embedded Products
- * Driver for LTC3651 charger IC.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/device.h>
@@ -19,7 +15,7 @@
#include <linux/slab.h>
#include <linux/of.h>
-struct ltc3651_charger {
+struct lt3651_charger {
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct gpio_desc *acpr_gpio;
@@ -27,7 +23,7 @@ struct ltc3651_charger {
struct gpio_desc *chrg_gpio;
};
-static irqreturn_t ltc3651_charger_irq(int irq, void *devid)
+static irqreturn_t lt3651_charger_irq(int irq, void *devid)
{
struct power_supply *charger = devid;
@@ -36,37 +32,37 @@ static irqreturn_t ltc3651_charger_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static inline struct ltc3651_charger *psy_to_ltc3651_charger(
+static inline struct lt3651_charger *psy_to_lt3651_charger(
struct power_supply *psy)
{
return power_supply_get_drvdata(psy);
}
-static int ltc3651_charger_get_property(struct power_supply *psy,
+static int lt3651_charger_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
- struct ltc3651_charger *ltc3651_charger = psy_to_ltc3651_charger(psy);
+ struct lt3651_charger *lt3651_charger = psy_to_lt3651_charger(psy);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (!ltc3651_charger->chrg_gpio) {
+ if (!lt3651_charger->chrg_gpio) {
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
}
- if (gpiod_get_value(ltc3651_charger->chrg_gpio))
+ if (gpiod_get_value(lt3651_charger->chrg_gpio))
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = gpiod_get_value(ltc3651_charger->acpr_gpio);
+ val->intval = gpiod_get_value(lt3651_charger->acpr_gpio);
break;
case POWER_SUPPLY_PROP_HEALTH:
- if (!ltc3651_charger->fault_gpio) {
+ if (!lt3651_charger->fault_gpio) {
val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
break;
}
- if (!gpiod_get_value(ltc3651_charger->fault_gpio)) {
+ if (!gpiod_get_value(lt3651_charger->fault_gpio)) {
val->intval = POWER_SUPPLY_HEALTH_GOOD;
break;
}
@@ -74,11 +70,11 @@ static int ltc3651_charger_get_property(struct power_supply *psy,
* If the fault pin is active, the chrg pin explains the type
* of failure.
*/
- if (!ltc3651_charger->chrg_gpio) {
+ if (!lt3651_charger->chrg_gpio) {
val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
break;
}
- val->intval = gpiod_get_value(ltc3651_charger->chrg_gpio) ?
+ val->intval = gpiod_get_value(lt3651_charger->chrg_gpio) ?
POWER_SUPPLY_HEALTH_OVERHEAT :
POWER_SUPPLY_HEALTH_DEAD;
break;
@@ -89,59 +85,59 @@ static int ltc3651_charger_get_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property ltc3651_charger_properties[] = {
+static enum power_supply_property lt3651_charger_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
};
-static int ltc3651_charger_probe(struct platform_device *pdev)
+static int lt3651_charger_probe(struct platform_device *pdev)
{
struct power_supply_config psy_cfg = {};
- struct ltc3651_charger *ltc3651_charger;
+ struct lt3651_charger *lt3651_charger;
struct power_supply_desc *charger_desc;
int ret;
- ltc3651_charger = devm_kzalloc(&pdev->dev, sizeof(*ltc3651_charger),
+ lt3651_charger = devm_kzalloc(&pdev->dev, sizeof(*lt3651_charger),
GFP_KERNEL);
- if (!ltc3651_charger)
+ if (!lt3651_charger)
return -ENOMEM;
- ltc3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev,
+ lt3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev,
"lltc,acpr", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->acpr_gpio)) {
- ret = PTR_ERR(ltc3651_charger->acpr_gpio);
+ if (IS_ERR(lt3651_charger->acpr_gpio)) {
+ ret = PTR_ERR(lt3651_charger->acpr_gpio);
dev_err(&pdev->dev, "Failed to acquire acpr GPIO: %d\n", ret);
return ret;
}
- ltc3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev,
+ lt3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev,
"lltc,fault", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->fault_gpio)) {
- ret = PTR_ERR(ltc3651_charger->fault_gpio);
+ if (IS_ERR(lt3651_charger->fault_gpio)) {
+ ret = PTR_ERR(lt3651_charger->fault_gpio);
dev_err(&pdev->dev, "Failed to acquire fault GPIO: %d\n", ret);
return ret;
}
- ltc3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev,
+ lt3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev,
"lltc,chrg", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->chrg_gpio)) {
- ret = PTR_ERR(ltc3651_charger->chrg_gpio);
+ if (IS_ERR(lt3651_charger->chrg_gpio)) {
+ ret = PTR_ERR(lt3651_charger->chrg_gpio);
dev_err(&pdev->dev, "Failed to acquire chrg GPIO: %d\n", ret);
return ret;
}
- charger_desc = &ltc3651_charger->charger_desc;
+ charger_desc = &lt3651_charger->charger_desc;
charger_desc->name = pdev->dev.of_node->name;
charger_desc->type = POWER_SUPPLY_TYPE_MAINS;
- charger_desc->properties = ltc3651_charger_properties;
- charger_desc->num_properties = ARRAY_SIZE(ltc3651_charger_properties);
- charger_desc->get_property = ltc3651_charger_get_property;
+ charger_desc->properties = lt3651_charger_properties;
+ charger_desc->num_properties = ARRAY_SIZE(lt3651_charger_properties);
+ charger_desc->get_property = lt3651_charger_get_property;
psy_cfg.of_node = pdev->dev.of_node;
- psy_cfg.drv_data = ltc3651_charger;
+ psy_cfg.drv_data = lt3651_charger;
- ltc3651_charger->charger = devm_power_supply_register(&pdev->dev,
+ lt3651_charger->charger = devm_power_supply_register(&pdev->dev,
charger_desc, &psy_cfg);
- if (IS_ERR(ltc3651_charger->charger)) {
- ret = PTR_ERR(ltc3651_charger->charger);
+ if (IS_ERR(lt3651_charger->charger)) {
+ ret = PTR_ERR(lt3651_charger->charger);
dev_err(&pdev->dev, "Failed to register power supply: %d\n",
ret);
return ret;
@@ -152,59 +148,60 @@ static int ltc3651_charger_probe(struct platform_device *pdev)
* support IRQs on these pins, userspace will have to poll the sysfs
* files manually.
*/
- if (ltc3651_charger->acpr_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->acpr_gpio);
+ if (lt3651_charger->acpr_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->acpr_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request acpr irq\n");
}
- if (ltc3651_charger->fault_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->fault_gpio);
+ if (lt3651_charger->fault_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->fault_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request fault irq\n");
}
- if (ltc3651_charger->chrg_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->chrg_gpio);
+ if (lt3651_charger->chrg_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->chrg_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request chrg irq\n");
}
- platform_set_drvdata(pdev, ltc3651_charger);
+ platform_set_drvdata(pdev, lt3651_charger);
return 0;
}
-static const struct of_device_id ltc3651_charger_match[] = {
- { .compatible = "lltc,ltc3651-charger" },
+static const struct of_device_id lt3651_charger_match[] = {
+ { .compatible = "lltc,ltc3651-charger" }, /* DEPRECATED */
+ { .compatible = "lltc,lt3651-charger" },
{ }
};
-MODULE_DEVICE_TABLE(of, ltc3651_charger_match);
+MODULE_DEVICE_TABLE(of, lt3651_charger_match);
-static struct platform_driver ltc3651_charger_driver = {
- .probe = ltc3651_charger_probe,
+static struct platform_driver lt3651_charger_driver = {
+ .probe = lt3651_charger_probe,
.driver = {
- .name = "ltc3651-charger",
- .of_match_table = ltc3651_charger_match,
+ .name = "lt3651-charger",
+ .of_match_table = lt3651_charger_match,
},
};
-module_platform_driver(ltc3651_charger_driver);
+module_platform_driver(lt3651_charger_driver);
MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
-MODULE_DESCRIPTION("Driver for LTC3651 charger");
+MODULE_DESCRIPTION("Driver for LT3651 charger");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ltc3651-charger");
+MODULE_ALIAS("platform:lt3651-charger");
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index b91b1d2999dc..9e6472834e37 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -240,6 +240,14 @@ static enum power_supply_property max14656_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static void stop_irq_work(void *data)
+{
+ struct max14656_chip *chip = data;
+
+ cancel_delayed_work_sync(&chip->irq_work);
+}
+
+
static int max14656_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -278,7 +286,19 @@ static int max14656_probe(struct i2c_client *client,
if (ret)
return -ENODEV;
+ chip->detect_psy = devm_power_supply_register(dev,
+ &chip->psy_desc, &psy_cfg);
+ if (IS_ERR(chip->detect_psy)) {
+ dev_err(dev, "power_supply_register failed\n");
+ return -EINVAL;
+ }
+
INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
+ ret = devm_add_action(dev, stop_irq_work, chip);
+ if (ret) {
+ dev_err(dev, "devm_add_action %d failed\n", ret);
+ return ret;
+ }
ret = devm_request_irq(dev, chip->irq, max14656_irq,
IRQF_TRIGGER_FALLING,
@@ -289,13 +309,6 @@ static int max14656_probe(struct i2c_client *client,
}
enable_irq_wake(chip->irq);
- chip->detect_psy = devm_power_supply_register(dev,
- &chip->psy_desc, &psy_cfg);
- if (IS_ERR(chip->detect_psy)) {
- dev_err(dev, "power_supply_register failed\n");
- return -EINVAL;
- }
-
schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000));
return 0;
diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c
new file mode 100644
index 000000000000..e34714cb05ec
--- /dev/null
+++ b/drivers/power/supply/max77650-charger.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// Battery charger driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#define MAX77650_CHARGER_ENABLED BIT(0)
+#define MAX77650_CHARGER_DISABLED 0x00
+#define MAX77650_CHARGER_CHG_EN_MASK BIT(0)
+
+#define MAX77650_CHG_DETAILS_MASK GENMASK(7, 4)
+#define MAX77650_CHG_DETAILS_BITS(_reg) \
+ (((_reg) & MAX77650_CHG_DETAILS_MASK) >> 4)
+
+/* Charger is OFF. */
+#define MAX77650_CHG_OFF 0x00
+/* Charger is in prequalification mode. */
+#define MAX77650_CHG_PREQ 0x01
+/* Charger is in fast-charge constant current mode. */
+#define MAX77650_CHG_ON_CURR 0x02
+/* Charger is in JEITA modified fast-charge constant-current mode. */
+#define MAX77650_CHG_ON_CURR_JEITA 0x03
+/* Charger is in fast-charge constant-voltage mode. */
+#define MAX77650_CHG_ON_VOLT 0x04
+/* Charger is in JEITA modified fast-charge constant-voltage mode. */
+#define MAX77650_CHG_ON_VOLT_JEITA 0x05
+/* Charger is in top-off mode. */
+#define MAX77650_CHG_ON_TOPOFF 0x06
+/* Charger is in JEITA modified top-off mode. */
+#define MAX77650_CHG_ON_TOPOFF_JEITA 0x07
+/* Charger is done. */
+#define MAX77650_CHG_DONE 0x08
+/* Charger is JEITA modified done. */
+#define MAX77650_CHG_DONE_JEITA 0x09
+/* Charger is suspended due to a prequalification timer fault. */
+#define MAX77650_CHG_SUSP_PREQ_TIM_FAULT 0x0a
+/* Charger is suspended due to a fast-charge timer fault. */
+#define MAX77650_CHG_SUSP_FAST_CHG_TIM_FAULT 0x0b
+/* Charger is suspended due to a battery temperature fault. */
+#define MAX77650_CHG_SUSP_BATT_TEMP_FAULT 0x0c
+
+#define MAX77650_CHGIN_DETAILS_MASK GENMASK(3, 2)
+#define MAX77650_CHGIN_DETAILS_BITS(_reg) \
+ (((_reg) & MAX77650_CHGIN_DETAILS_MASK) >> 2)
+
+#define MAX77650_CHGIN_UNDERVOLTAGE_LOCKOUT 0x00
+#define MAX77650_CHGIN_OVERVOLTAGE_LOCKOUT 0x01
+#define MAX77650_CHGIN_OKAY 0x11
+
+#define MAX77650_CHARGER_CHG_MASK BIT(1)
+#define MAX77650_CHARGER_CHG_CHARGING(_reg) \
+ (((_reg) & MAX77650_CHARGER_CHG_MASK) > 1)
+
+#define MAX77650_CHARGER_VCHGIN_MIN_MASK 0xc0
+#define MAX77650_CHARGER_VCHGIN_MIN_SHIFT(_val) ((_val) << 5)
+
+#define MAX77650_CHARGER_ICHGIN_LIM_MASK 0x1c
+#define MAX77650_CHARGER_ICHGIN_LIM_SHIFT(_val) ((_val) << 2)
+
+struct max77650_charger_data {
+ struct regmap *map;
+ struct device *dev;
+};
+
+static enum power_supply_property max77650_charger_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_TYPE
+};
+
+static const unsigned int max77650_charger_vchgin_min_table[] = {
+ 4000000, 4100000, 4200000, 4300000, 4400000, 4500000, 4600000, 4700000
+};
+
+static const unsigned int max77650_charger_ichgin_lim_table[] = {
+ 95000, 190000, 285000, 380000, 475000
+};
+
+static int max77650_charger_set_vchgin_min(struct max77650_charger_data *chg,
+ unsigned int val)
+{
+ int i, rv;
+
+ for (i = 0; i < ARRAY_SIZE(max77650_charger_vchgin_min_table); i++) {
+ if (val == max77650_charger_vchgin_min_table[i]) {
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_VCHGIN_MIN_MASK,
+ MAX77650_CHARGER_VCHGIN_MIN_SHIFT(i));
+ if (rv)
+ return rv;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max77650_charger_set_ichgin_lim(struct max77650_charger_data *chg,
+ unsigned int val)
+{
+ int i, rv;
+
+ for (i = 0; i < ARRAY_SIZE(max77650_charger_ichgin_lim_table); i++) {
+ if (val == max77650_charger_ichgin_lim_table[i]) {
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_ICHGIN_LIM_MASK,
+ MAX77650_CHARGER_ICHGIN_LIM_SHIFT(i));
+ if (rv)
+ return rv;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max77650_charger_enable(struct max77650_charger_data *chg)
+{
+ int rv;
+
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_CHG_EN_MASK,
+ MAX77650_CHARGER_ENABLED);
+ if (rv)
+ dev_err(chg->dev, "unable to enable the charger: %d\n", rv);
+
+ return rv;
+}
+
+static int max77650_charger_disable(struct max77650_charger_data *chg)
+{
+ int rv;
+
+ rv = regmap_update_bits(chg->map,
+ MAX77650_REG_CNFG_CHG_B,
+ MAX77650_CHARGER_CHG_EN_MASK,
+ MAX77650_CHARGER_DISABLED);
+ if (rv)
+ dev_err(chg->dev, "unable to disable the charger: %d\n", rv);
+
+ return rv;
+}
+
+static irqreturn_t max77650_charger_check_status(int irq, void *data)
+{
+ struct max77650_charger_data *chg = data;
+ int rv, reg;
+
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv) {
+ dev_err(chg->dev,
+ "unable to read the charger status: %d\n", rv);
+ return IRQ_HANDLED;
+ }
+
+ switch (MAX77650_CHGIN_DETAILS_BITS(reg)) {
+ case MAX77650_CHGIN_UNDERVOLTAGE_LOCKOUT:
+ dev_err(chg->dev, "undervoltage lockout detected, disabling charger\n");
+ max77650_charger_disable(chg);
+ break;
+ case MAX77650_CHGIN_OVERVOLTAGE_LOCKOUT:
+ dev_err(chg->dev, "overvoltage lockout detected, disabling charger\n");
+ max77650_charger_disable(chg);
+ break;
+ case MAX77650_CHGIN_OKAY:
+ max77650_charger_enable(chg);
+ break;
+ default:
+ /* May be 0x10 - debouncing */
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int max77650_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max77650_charger_data *chg = power_supply_get_drvdata(psy);
+ int rv, reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv)
+ return rv;
+
+ if (MAX77650_CHARGER_CHG_CHARGING(reg)) {
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ }
+
+ switch (MAX77650_CHG_DETAILS_BITS(reg)) {
+ case MAX77650_CHG_OFF:
+ case MAX77650_CHG_SUSP_PREQ_TIM_FAULT:
+ case MAX77650_CHG_SUSP_FAST_CHG_TIM_FAULT:
+ case MAX77650_CHG_SUSP_BATT_TEMP_FAULT:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case MAX77650_CHG_PREQ:
+ case MAX77650_CHG_ON_CURR:
+ case MAX77650_CHG_ON_CURR_JEITA:
+ case MAX77650_CHG_ON_VOLT:
+ case MAX77650_CHG_ON_VOLT_JEITA:
+ case MAX77650_CHG_ON_TOPOFF:
+ case MAX77650_CHG_ON_TOPOFF_JEITA:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case MAX77650_CHG_DONE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv)
+ return rv;
+
+ val->intval = MAX77650_CHARGER_CHG_CHARGING(reg);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, &reg);
+ if (rv)
+ return rv;
+
+ if (!MAX77650_CHARGER_CHG_CHARGING(reg)) {
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ }
+
+ switch (MAX77650_CHG_DETAILS_BITS(reg)) {
+ case MAX77650_CHG_PREQ:
+ case MAX77650_CHG_ON_CURR:
+ case MAX77650_CHG_ON_CURR_JEITA:
+ case MAX77650_CHG_ON_VOLT:
+ case MAX77650_CHG_ON_VOLT_JEITA:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case MAX77650_CHG_ON_TOPOFF:
+ case MAX77650_CHG_ON_TOPOFF_JEITA:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct power_supply_desc max77650_battery_desc = {
+ .name = "max77650",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .get_property = max77650_charger_get_property,
+ .properties = max77650_charger_properties,
+ .num_properties = ARRAY_SIZE(max77650_charger_properties),
+};
+
+static int max77650_charger_probe(struct platform_device *pdev)
+{
+ struct power_supply_config pscfg = {};
+ struct max77650_charger_data *chg;
+ struct power_supply *battery;
+ struct device *dev, *parent;
+ int rv, chg_irq, chgin_irq;
+ unsigned int prop;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+
+ chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, chg);
+
+ chg->map = dev_get_regmap(parent, NULL);
+ if (!chg->map)
+ return -ENODEV;
+
+ chg->dev = dev;
+
+ pscfg.of_node = dev->of_node;
+ pscfg.drv_data = chg;
+
+ chg_irq = platform_get_irq_byname(pdev, "CHG");
+ if (chg_irq < 0)
+ return chg_irq;
+
+ chgin_irq = platform_get_irq_byname(pdev, "CHGIN");
+ if (chgin_irq < 0)
+ return chgin_irq;
+
+ rv = devm_request_any_context_irq(dev, chg_irq,
+ max77650_charger_check_status,
+ IRQF_ONESHOT, "chg", chg);
+ if (rv < 0)
+ return rv;
+
+ rv = devm_request_any_context_irq(dev, chgin_irq,
+ max77650_charger_check_status,
+ IRQF_ONESHOT, "chgin", chg);
+ if (rv < 0)
+ return rv;
+
+ battery = devm_power_supply_register(dev,
+ &max77650_battery_desc, &pscfg);
+ if (IS_ERR(battery))
+ return PTR_ERR(battery);
+
+ rv = of_property_read_u32(dev->of_node,
+ "input-voltage-min-microvolt", &prop);
+ if (rv == 0) {
+ rv = max77650_charger_set_vchgin_min(chg, prop);
+ if (rv)
+ return rv;
+ }
+
+ rv = of_property_read_u32(dev->of_node,
+ "input-current-limit-microamp", &prop);
+ if (rv == 0) {
+ rv = max77650_charger_set_ichgin_lim(chg, prop);
+ if (rv)
+ return rv;
+ }
+
+ return max77650_charger_enable(chg);
+}
+
+static int max77650_charger_remove(struct platform_device *pdev)
+{
+ struct max77650_charger_data *chg = platform_get_drvdata(pdev);
+
+ return max77650_charger_disable(chg);
+}
+
+static struct platform_driver max77650_charger_driver = {
+ .driver = {
+ .name = "max77650-charger",
+ },
+ .probe = max77650_charger_probe,
+ .remove = max77650_charger_remove,
+};
+module_platform_driver(max77650_charger_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 charger driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 5a97e42a3547..7720e4c2ac0b 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/jiffies.h>
@@ -52,6 +53,14 @@
#define BAT_ADDR_MFR_TYPE 0x5F
+struct olpc_battery_data {
+ struct power_supply *olpc_ac;
+ struct power_supply *olpc_bat;
+ char bat_serial[17];
+ bool new_proto;
+ bool little_endian;
+};
+
/*********************************************************************
* Power
*********************************************************************/
@@ -90,13 +99,10 @@ static const struct power_supply_desc olpc_ac_desc = {
.get_property = olpc_ac_get_prop,
};
-static struct power_supply *olpc_ac;
-
-static char bat_serial[17]; /* Ick */
-
-static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte)
+static int olpc_bat_get_status(struct olpc_battery_data *data,
+ union power_supply_propval *val, uint8_t ec_byte)
{
- if (olpc_platform_info.ecver > 0x44) {
+ if (data->new_proto) {
if (ec_byte & (BAT_STAT_CHARGING | BAT_STAT_TRICKLE))
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (ec_byte & BAT_STAT_DISCHARGING)
@@ -318,6 +324,14 @@ static int olpc_bat_get_voltage_max_design(union power_supply_propval *val)
return ret;
}
+static u16 ecword_to_cpu(struct olpc_battery_data *data, u16 ec_word)
+{
+ if (data->little_endian)
+ return le16_to_cpu((__force __le16)ec_word);
+ else
+ return be16_to_cpu((__force __be16)ec_word);
+}
+
/*********************************************************************
* Battery properties
*********************************************************************/
@@ -325,8 +339,9 @@ static int olpc_bat_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct olpc_battery_data *data = power_supply_get_drvdata(psy);
int ret = 0;
- __be16 ec_word;
+ u16 ec_word;
uint8_t ec_byte;
__be64 ser_buf;
@@ -346,7 +361,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- ret = olpc_bat_get_status(val, ec_byte);
+ ret = olpc_bat_get_status(data, val, ec_byte);
if (ret)
return ret;
break;
@@ -389,7 +404,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
+ val->intval = ecword_to_cpu(data, ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_NOW:
@@ -397,7 +412,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
+ val->intval = ecword_to_cpu(data, ec_word) * 15625L / 120;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
@@ -428,29 +443,29 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
+ val->intval = ecword_to_cpu(data, ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
+ val->intval = (int)ecword_to_cpu(data, ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
+ val->intval = ecword_to_cpu(data, ec_word) * 6250 / 15;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
if (ret)
return ret;
- sprintf(bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
- val->strval = bat_serial;
+ sprintf(data->bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
+ val->strval = data->bat_serial;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
ret = olpc_bat_get_voltage_max_design(val);
@@ -536,7 +551,7 @@ static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
return count;
}
-static const struct bin_attribute olpc_bat_eeprom = {
+static struct bin_attribute olpc_bat_eeprom = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
@@ -560,7 +575,7 @@ static ssize_t olpc_bat_error_read(struct device *dev,
return sprintf(buf, "%d\n", ec_byte);
}
-static const struct device_attribute olpc_bat_error = {
+static struct device_attribute olpc_bat_error = {
.attr = {
.name = "error",
.mode = S_IRUGO,
@@ -568,6 +583,27 @@ static const struct device_attribute olpc_bat_error = {
.show = olpc_bat_error_read,
};
+static struct attribute *olpc_bat_sysfs_attrs[] = {
+ &olpc_bat_error.attr,
+ NULL
+};
+
+static struct bin_attribute *olpc_bat_sysfs_bin_attrs[] = {
+ &olpc_bat_eeprom,
+ NULL
+};
+
+static const struct attribute_group olpc_bat_sysfs_group = {
+ .attrs = olpc_bat_sysfs_attrs,
+ .bin_attrs = olpc_bat_sysfs_bin_attrs,
+
+};
+
+static const struct attribute_group *olpc_bat_sysfs_groups[] = {
+ &olpc_bat_sysfs_group,
+ NULL
+};
+
/*********************************************************************
* Initialisation
*********************************************************************/
@@ -578,17 +614,17 @@ static struct power_supply_desc olpc_bat_desc = {
.use_for_apm = 1,
};
-static struct power_supply *olpc_bat;
-
static int olpc_battery_suspend(struct platform_device *pdev,
pm_message_t state)
{
- if (device_may_wakeup(&olpc_ac->dev))
+ struct olpc_battery_data *data = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&data->olpc_ac->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_ACPWR);
else
olpc_ec_wakeup_clear(EC_SCI_SRC_ACPWR);
- if (device_may_wakeup(&olpc_bat->dev))
+ if (device_may_wakeup(&data->olpc_bat->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_BATTERY | EC_SCI_SRC_BATSOC
| EC_SCI_SRC_BATERR);
else
@@ -600,16 +636,37 @@ static int olpc_battery_suspend(struct platform_device *pdev,
static int olpc_battery_probe(struct platform_device *pdev)
{
- int ret;
+ struct power_supply_config bat_psy_cfg = {};
+ struct power_supply_config ac_psy_cfg = {};
+ struct olpc_battery_data *data;
uint8_t status;
+ uint8_t ecver;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, data);
- /*
- * We've seen a number of EC protocol changes; this driver requires
- * the latest EC protocol, supported by 0x44 and above.
- */
- if (olpc_platform_info.ecver < 0x44) {
+ /* See if the EC is already there and get the EC revision */
+ ret = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ecver, 1);
+ if (ret)
+ return ret;
+
+ if (of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec")) {
+ /* XO 1.75 */
+ data->new_proto = true;
+ data->little_endian = true;
+ } else if (ecver > 0x44) {
+ /* XO 1 or 1.5 with a new EC firmware. */
+ data->new_proto = true;
+ } else if (ecver < 0x44) {
+ /*
+ * We've seen a number of EC protocol changes; this driver
+ * requires the latest EC protocol, supported by 0x44 and above.
+ */
printk(KERN_NOTICE "OLPC EC version 0x%02x too old for "
- "battery driver.\n", olpc_platform_info.ecver);
+ "battery driver.\n", ecver);
return -ENXIO;
}
@@ -619,59 +676,44 @@ static int olpc_battery_probe(struct platform_device *pdev)
/* Ignore the status. It doesn't actually matter */
- olpc_ac = power_supply_register(&pdev->dev, &olpc_ac_desc, NULL);
- if (IS_ERR(olpc_ac))
- return PTR_ERR(olpc_ac);
+ ac_psy_cfg.of_node = pdev->dev.of_node;
+ ac_psy_cfg.drv_data = data;
+
+ data->olpc_ac = devm_power_supply_register(&pdev->dev, &olpc_ac_desc,
+ &ac_psy_cfg);
+ if (IS_ERR(data->olpc_ac))
+ return PTR_ERR(data->olpc_ac);
- if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
+ if (of_device_is_compatible(pdev->dev.of_node, "olpc,xo1.5-battery")) {
+ /* XO-1.5 */
olpc_bat_desc.properties = olpc_xo15_bat_props;
olpc_bat_desc.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
- } else { /* XO-1 */
+ } else {
+ /* XO-1 */
olpc_bat_desc.properties = olpc_xo1_bat_props;
olpc_bat_desc.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
}
- olpc_bat = power_supply_register(&pdev->dev, &olpc_bat_desc, NULL);
- if (IS_ERR(olpc_bat)) {
- ret = PTR_ERR(olpc_bat);
- goto battery_failed;
- }
-
- ret = device_create_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
- if (ret)
- goto eeprom_failed;
+ bat_psy_cfg.of_node = pdev->dev.of_node;
+ bat_psy_cfg.drv_data = data;
+ bat_psy_cfg.attr_grp = olpc_bat_sysfs_groups;
- ret = device_create_file(&olpc_bat->dev, &olpc_bat_error);
- if (ret)
- goto error_failed;
+ data->olpc_bat = devm_power_supply_register(&pdev->dev, &olpc_bat_desc,
+ &bat_psy_cfg);
+ if (IS_ERR(data->olpc_bat))
+ return PTR_ERR(data->olpc_bat);
if (olpc_ec_wakeup_available()) {
- device_set_wakeup_capable(&olpc_ac->dev, true);
- device_set_wakeup_capable(&olpc_bat->dev, true);
+ device_set_wakeup_capable(&data->olpc_ac->dev, true);
+ device_set_wakeup_capable(&data->olpc_bat->dev, true);
}
return 0;
-
-error_failed:
- device_remove_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
-eeprom_failed:
- power_supply_unregister(olpc_bat);
-battery_failed:
- power_supply_unregister(olpc_ac);
- return ret;
-}
-
-static int olpc_battery_remove(struct platform_device *pdev)
-{
- device_remove_file(&olpc_bat->dev, &olpc_bat_error);
- device_remove_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
- power_supply_unregister(olpc_bat);
- power_supply_unregister(olpc_ac);
- return 0;
}
static const struct of_device_id olpc_battery_ids[] = {
{ .compatible = "olpc,xo1-battery" },
+ { .compatible = "olpc,xo1.5-battery" },
{}
};
MODULE_DEVICE_TABLE(of, olpc_battery_ids);
@@ -682,7 +724,6 @@ static struct platform_driver olpc_battery_driver = {
.of_match_table = olpc_battery_ids,
},
.probe = olpc_battery_probe,
- .remove = olpc_battery_remove,
.suspend = olpc_battery_suspend,
};
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index c917a8b43b2b..f7033ecf6d0b 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -598,10 +598,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = of_property_read_string(battery_np, "compatible", &value);
if (err)
- return err;
+ goto out_put_node;
- if (strcmp("simple-battery", value))
- return -ENODEV;
+ if (strcmp("simple-battery", value)) {
+ err = -ENODEV;
+ goto out_put_node;
+ }
/* The property and field names below must correspond to elements
* in enum power_supply_property. For reasoning, see
@@ -620,19 +622,21 @@ int power_supply_get_battery_info(struct power_supply *psy,
&info->precharge_current_ua);
of_property_read_u32(battery_np, "charge-term-current-microamp",
&info->charge_term_current_ua);
- of_property_read_u32(battery_np, "constant_charge_current_max_microamp",
+ of_property_read_u32(battery_np, "constant-charge-current-max-microamp",
&info->constant_charge_current_max_ua);
- of_property_read_u32(battery_np, "constant_charge_voltage_max_microvolt",
+ of_property_read_u32(battery_np, "constant-charge-voltage-max-microvolt",
&info->constant_charge_voltage_max_uv);
of_property_read_u32(battery_np, "factory-internal-resistance-micro-ohms",
&info->factory_internal_resistance_uohm);
len = of_property_count_u32_elems(battery_np, "ocv-capacity-celsius");
if (len < 0 && len != -EINVAL) {
- return len;
+ err = len;
+ goto out_put_node;
} else if (len > POWER_SUPPLY_OCV_TEMP_MAX) {
dev_err(&psy->dev, "Too many temperature values\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
} else if (len > 0) {
of_property_read_u32_array(battery_np, "ocv-capacity-celsius",
info->ocv_temp, len);
@@ -650,7 +654,8 @@ int power_supply_get_battery_info(struct power_supply *psy,
dev_err(&psy->dev, "failed to get %s\n", propname);
kfree(propname);
power_supply_put_battery_info(psy, info);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
}
kfree(propname);
@@ -661,16 +666,21 @@ int power_supply_get_battery_info(struct power_supply *psy,
devm_kcalloc(&psy->dev, tab_len, sizeof(*table), GFP_KERNEL);
if (!info->ocv_table[index]) {
power_supply_put_battery_info(psy, info);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_put_node;
}
for (i = 0; i < tab_len; i++) {
- table[i].ocv = be32_to_cpu(*list++);
- table[i].capacity = be32_to_cpu(*list++);
+ table[i].ocv = be32_to_cpu(*list);
+ list++;
+ table[i].capacity = be32_to_cpu(*list);
+ list++;
}
}
- return 0;
+out_put_node:
+ of_node_put(battery_np);
+ return err;
}
EXPORT_SYMBOL_GPL(power_supply_get_battery_info);
@@ -899,7 +909,7 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
return ret;
}
-static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
+static int ps_get_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long *state)
{
struct power_supply *psy;
@@ -934,7 +944,7 @@ static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
static const struct thermal_cooling_device_ops psy_tcd_ops = {
.get_max_state = ps_get_max_charge_cntl_limit,
- .get_cur_state = ps_get_cur_chrage_cntl_limit,
+ .get_cur_state = ps_get_cur_charge_cntl_limit,
.set_cur_state = ps_set_cur_charge_cntl_limit,
};
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 5358a80d854f..a704a76d7529 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -56,13 +56,13 @@ static const char * const power_supply_status_text[] = {
};
static const char * const power_supply_charge_type_text[] = {
- "Unknown", "N/A", "Trickle", "Fast"
+ "Unknown", "N/A", "Trickle", "Fast", "Standard", "Adaptive", "Custom"
};
static const char * const power_supply_health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
"Unspecified failure", "Cold", "Watchdog timer expire",
- "Safety timer expire"
+ "Safety timer expire", "Over current"
};
static const char * const power_supply_technology_text[] = {
@@ -274,6 +274,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(constant_charge_voltage_max),
POWER_SUPPLY_ATTR(charge_control_limit),
POWER_SUPPLY_ATTR(charge_control_limit_max),
+ POWER_SUPPLY_ATTR(charge_control_start_threshold),
+ POWER_SUPPLY_ATTR(charge_control_end_threshold),
POWER_SUPPLY_ATTR(input_current_limit),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
new file mode 100644
index 000000000000..1c89d030c045
--- /dev/null
+++ b/drivers/power/supply/ucs1002_power.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for UCS1002 Programmable USB Port Power Controller
+ *
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+#include <linux/bits.h>
+#include <linux/freezer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/* UCS1002 Registers */
+#define UCS1002_REG_CURRENT_MEASUREMENT 0x00
+
+/*
+ * The Total Accumulated Charge registers store the total accumulated
+ * charge delivered from the VS source to a portable device. The total
+ * value is calculated using four registers, from 01h to 04h. The bit
+ * weighting of the registers is given in mA/hrs.
+ */
+#define UCS1002_REG_TOTAL_ACC_CHARGE 0x01
+
+/* Other Status Register */
+#define UCS1002_REG_OTHER_STATUS 0x0f
+# define F_ADET_PIN BIT(4)
+# define F_CHG_ACT BIT(3)
+
+/* Interrupt Status */
+#define UCS1002_REG_INTERRUPT_STATUS 0x10
+# define F_DISCHARGE_ERR BIT(6)
+# define F_RESET BIT(5)
+# define F_MIN_KEEP_OUT BIT(4)
+# define F_TSD BIT(3)
+# define F_OVER_VOLT BIT(2)
+# define F_BACK_VOLT BIT(1)
+# define F_OVER_ILIM BIT(0)
+
+/* Pin Status Register */
+#define UCS1002_REG_PIN_STATUS 0x14
+# define UCS1002_PWR_STATE_MASK 0x03
+# define F_PWR_EN_PIN BIT(6)
+# define F_M2_PIN BIT(5)
+# define F_M1_PIN BIT(4)
+# define F_EM_EN_PIN BIT(3)
+# define F_SEL_PIN BIT(2)
+# define F_ACTIVE_MODE_MASK GENMASK(5, 3)
+# define F_ACTIVE_MODE_PASSTHROUGH F_M2_PIN
+# define F_ACTIVE_MODE_DEDICATED F_EM_EN_PIN
+# define F_ACTIVE_MODE_BC12_DCP (F_M2_PIN | F_EM_EN_PIN)
+# define F_ACTIVE_MODE_BC12_SDP F_M1_PIN
+# define F_ACTIVE_MODE_BC12_CDP (F_M1_PIN | F_M2_PIN | F_EM_EN_PIN)
+
+/* General Configuration Register */
+#define UCS1002_REG_GENERAL_CFG 0x15
+# define F_RATION_EN BIT(3)
+
+/* Emulation Configuration Register */
+#define UCS1002_REG_EMU_CFG 0x16
+
+/* Switch Configuration Register */
+#define UCS1002_REG_SWITCH_CFG 0x17
+# define F_PIN_IGNORE BIT(7)
+# define F_EM_EN_SET BIT(5)
+# define F_M2_SET BIT(4)
+# define F_M1_SET BIT(3)
+# define F_S0_SET BIT(2)
+# define F_PWR_EN_SET BIT(1)
+# define F_LATCH_SET BIT(0)
+# define V_SET_ACTIVE_MODE_MASK GENMASK(5, 3)
+# define V_SET_ACTIVE_MODE_PASSTHROUGH F_M2_SET
+# define V_SET_ACTIVE_MODE_DEDICATED F_EM_EN_SET
+# define V_SET_ACTIVE_MODE_BC12_DCP (F_M2_SET | F_EM_EN_SET)
+# define V_SET_ACTIVE_MODE_BC12_SDP F_M1_SET
+# define V_SET_ACTIVE_MODE_BC12_CDP (F_M1_SET | F_M2_SET | F_EM_EN_SET)
+
+/* Current Limit Register */
+#define UCS1002_REG_ILIMIT 0x19
+# define UCS1002_ILIM_SW_MASK GENMASK(3, 0)
+
+/* Product ID */
+#define UCS1002_REG_PRODUCT_ID 0xfd
+# define UCS1002_PRODUCT_ID 0x4e
+
+/* Manufacture name */
+#define UCS1002_MANUFACTURER "SMSC"
+
+struct ucs1002_info {
+ struct power_supply *charger;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct regulator_desc *regulator_descriptor;
+ bool present;
+};
+
+static enum power_supply_property ucs1002_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of PED */
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+static int ucs1002_get_online(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_OTHER_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = !!(reg & F_CHG_ACT);
+
+ return 0;
+}
+
+static int ucs1002_get_charge(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ /*
+ * To fit within 32 bits some values are rounded (uA/h)
+ *
+ * For Total Accumulated Charge Middle Low Byte register, addr
+ * 03h, byte 2
+ *
+ * B0: 0.01084 mA/h rounded to 11 uA/h
+ * B1: 0.02169 mA/h rounded to 22 uA/h
+ * B2: 0.04340 mA/h rounded to 43 uA/h
+ * B3: 0.08676 mA/h rounded to 87 uA/h
+ * B4: 0.17350 mA/h rounded to 173 uÁ/h
+ *
+ * For Total Accumulated Charge Low Byte register, addr 04h,
+ * byte 3
+ *
+ * B6: 0.00271 mA/h rounded to 3 uA/h
+ * B7: 0.005422 mA/h rounded to 5 uA/h
+ */
+ static const int bit_weights_uAh[BITS_PER_TYPE(u32)] = {
+ /*
+ * Bit corresponding to low byte (offset 0x04)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 0, 0, 0, 0, 0, 0, 3, 5,
+ /*
+ * Bit corresponding to middle low byte (offset 0x03)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 11, 22, 43, 87, 173, 347, 694, 1388,
+ /*
+ * Bit corresponding to middle high byte (offset 0x02)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 2776, 5552, 11105, 22210, 44420, 88840, 177700, 355400,
+ /*
+ * Bit corresponding to high byte (offset 0x01)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 710700, 1421000, 2843000, 5685000, 11371000, 22742000,
+ 45484000, 90968000,
+ };
+ unsigned long total_acc_charger;
+ unsigned int reg;
+ int i, ret;
+
+ ret = regmap_bulk_read(info->regmap, UCS1002_REG_TOTAL_ACC_CHARGE,
+ &reg, sizeof(u32));
+ if (ret)
+ return ret;
+
+ total_acc_charger = be32_to_cpu(reg); /* BE as per offsets above */
+ val->intval = 0;
+
+ for_each_set_bit(i, &total_acc_charger, ARRAY_SIZE(bit_weights_uAh))
+ val->intval += bit_weights_uAh[i];
+
+ return 0;
+}
+
+static int ucs1002_get_current(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ /*
+ * The Current Measurement register stores the measured
+ * current value delivered to the portable device. The range
+ * is from 9.76 mA to 2.5 A.
+ */
+ static const int bit_weights_uA[BITS_PER_TYPE(u8)] = {
+ 9760, 19500, 39000, 78100, 156200, 312300, 624600, 1249300,
+ };
+ unsigned long current_measurement;
+ unsigned int reg;
+ int i, ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_CURRENT_MEASUREMENT, &reg);
+ if (ret)
+ return ret;
+
+ current_measurement = reg;
+ val->intval = 0;
+
+ for_each_set_bit(i, &current_measurement, ARRAY_SIZE(bit_weights_uA))
+ val->intval += bit_weights_uA[i];
+
+ return 0;
+}
+
+/*
+ * The Current Limit register stores the maximum current used by the
+ * port switch. The range is from 500mA to 2.5 A.
+ */
+static const u32 ucs1002_current_limit_uA[] = {
+ 500000, 900000, 1000000, 1200000, 1500000, 1800000, 2000000, 2500000,
+};
+
+static int ucs1002_get_max_current(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = ucs1002_current_limit_uA[reg & UCS1002_ILIM_SW_MASK];
+
+ return 0;
+}
+
+static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
+{
+ unsigned int reg;
+ int ret, idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ucs1002_current_limit_uA); idx++) {
+ if (val == ucs1002_current_limit_uA[idx])
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(ucs1002_current_limit_uA))
+ return -EINVAL;
+
+ ret = regmap_write(info->regmap, UCS1002_REG_ILIMIT, idx);
+ if (ret)
+ return ret;
+ /*
+ * Any current limit setting exceeding the one set via ILIM
+ * pin will be rejected, so we read out freshly changed limit
+ * to make sure that it took effect.
+ */
+ ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
+ if (ret)
+ return ret;
+
+ if (reg != idx)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum power_supply_usb_type ucs1002_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+};
+
+static int ucs1002_set_usb_type(struct ucs1002_info *info, int val)
+{
+ unsigned int mode;
+
+ if (val < 0 || val >= ARRAY_SIZE(ucs1002_usb_types))
+ return -EINVAL;
+
+ switch (ucs1002_usb_types[val]) {
+ case POWER_SUPPLY_USB_TYPE_PD:
+ mode = V_SET_ACTIVE_MODE_DEDICATED;
+ break;
+ case POWER_SUPPLY_USB_TYPE_SDP:
+ mode = V_SET_ACTIVE_MODE_BC12_SDP;
+ break;
+ case POWER_SUPPLY_USB_TYPE_DCP:
+ mode = V_SET_ACTIVE_MODE_BC12_DCP;
+ break;
+ case POWER_SUPPLY_USB_TYPE_CDP:
+ mode = V_SET_ACTIVE_MODE_BC12_CDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(info->regmap, UCS1002_REG_SWITCH_CFG,
+ V_SET_ACTIVE_MODE_MASK, mode);
+}
+
+static int ucs1002_get_usb_type(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ enum power_supply_usb_type type;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PIN_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ switch (reg & F_ACTIVE_MODE_MASK) {
+ default:
+ type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ case F_ACTIVE_MODE_DEDICATED:
+ type = POWER_SUPPLY_USB_TYPE_PD;
+ break;
+ case F_ACTIVE_MODE_BC12_SDP:
+ type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case F_ACTIVE_MODE_BC12_DCP:
+ type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case F_ACTIVE_MODE_BC12_CDP:
+ type = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ };
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int ucs1002_get_health(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret, health;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_INTERRUPT_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ if (reg & F_TSD)
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (reg & (F_OVER_VOLT | F_BACK_VOLT))
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (reg & F_OVER_ILIM)
+ health = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ else if (reg & (F_DISCHARGE_ERR | F_MIN_KEEP_OUT))
+ health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else
+ health = POWER_SUPPLY_HEALTH_GOOD;
+
+ val->intval = health;
+
+ return 0;
+}
+
+static int ucs1002_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ucs1002_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return ucs1002_get_online(info, val);
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ return ucs1002_get_charge(info, val);
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return ucs1002_get_current(info, val);
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucs1002_get_max_current(info, val);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucs1002_get_usb_type(info, val);
+ case POWER_SUPPLY_PROP_HEALTH:
+ return ucs1002_get_health(info, val);
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = info->present;
+ return 0;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = UCS1002_MANUFACTURER;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ucs1002_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct ucs1002_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucs1002_set_max_current(info, val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucs1002_set_usb_type(info, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ucs1002_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct power_supply_desc ucs1002_charger_desc = {
+ .name = "ucs1002",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = ucs1002_usb_types,
+ .num_usb_types = ARRAY_SIZE(ucs1002_usb_types),
+ .get_property = ucs1002_get_property,
+ .set_property = ucs1002_set_property,
+ .property_is_writeable = ucs1002_property_is_writeable,
+ .properties = ucs1002_props,
+ .num_properties = ARRAY_SIZE(ucs1002_props),
+};
+
+static irqreturn_t ucs1002_charger_irq(int irq, void *data)
+{
+ int ret, regval;
+ bool present;
+ struct ucs1002_info *info = data;
+
+ present = info->present;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_OTHER_STATUS, &regval);
+ if (ret)
+ return IRQ_HANDLED;
+
+ /* update attached status */
+ info->present = regval & F_ADET_PIN;
+
+ /* notify the change */
+ if (present != info->present)
+ power_supply_changed(info->charger);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ucs1002_alert_irq(int irq, void *data)
+{
+ struct ucs1002_info *info = data;
+
+ power_supply_changed(info->charger);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regulator_ops ucs1002_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_desc ucs1002_regulator_descriptor = {
+ .name = "ucs1002-vbus",
+ .ops = &ucs1002_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = UCS1002_REG_SWITCH_CFG,
+ .enable_mask = F_PWR_EN_SET,
+ .enable_val = F_PWR_EN_SET,
+ .fixed_uV = 5000000,
+ .n_voltages = 1,
+};
+
+static int ucs1002_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct power_supply_config charger_config = {};
+ const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ struct regulator_config regulator_config = {};
+ int irq_a_det, irq_alert, ret;
+ struct regulator_dev *rdev;
+ struct ucs1002_info *info;
+ unsigned int regval;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ ret = PTR_ERR_OR_ZERO(info->regmap);
+ if (ret) {
+ dev_err(dev, "Regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ info->client = client;
+
+ irq_a_det = of_irq_get_byname(dev->of_node, "a_det");
+ irq_alert = of_irq_get_byname(dev->of_node, "alert");
+
+ charger_config.of_node = dev->of_node;
+ charger_config.drv_data = info;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PRODUCT_ID, &regval);
+ if (ret) {
+ dev_err(dev, "Failed to read product ID: %d\n", ret);
+ return ret;
+ }
+
+ if (regval != UCS1002_PRODUCT_ID) {
+ dev_err(dev,
+ "Product ID does not match (0x%02x != 0x%02x)\n",
+ regval, UCS1002_PRODUCT_ID);
+ return -ENODEV;
+ }
+
+ /* Enable charge rationing by default */
+ ret = regmap_update_bits(info->regmap, UCS1002_REG_GENERAL_CFG,
+ F_RATION_EN, F_RATION_EN);
+ if (ret) {
+ dev_err(dev, "Failed to read general config: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Ignore the M1, M2, PWR_EN, and EM_EN pin states. Set active
+ * mode selection to BC1.2 CDP.
+ */
+ ret = regmap_update_bits(info->regmap, UCS1002_REG_SWITCH_CFG,
+ V_SET_ACTIVE_MODE_MASK | F_PIN_IGNORE,
+ V_SET_ACTIVE_MODE_BC12_CDP | F_PIN_IGNORE);
+ if (ret) {
+ dev_err(dev, "Failed to configure default mode: %d\n", ret);
+ return ret;
+ }
+ /*
+ * Be safe and set initial current limit to 500mA
+ */
+ ret = ucs1002_set_max_current(info, 500000);
+ if (ret) {
+ dev_err(dev, "Failed to set max current default: %d\n", ret);
+ return ret;
+ }
+
+ info->charger = devm_power_supply_register(dev, &ucs1002_charger_desc,
+ &charger_config);
+ ret = PTR_ERR_OR_ZERO(info->charger);
+ if (ret) {
+ dev_err(dev, "Failed to register power supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PIN_STATUS, &regval);
+ if (ret) {
+ dev_err(dev, "Failed to read pin status: %d\n", ret);
+ return ret;
+ }
+
+ info->regulator_descriptor =
+ devm_kmemdup(dev, &ucs1002_regulator_descriptor,
+ sizeof(ucs1002_regulator_descriptor),
+ GFP_KERNEL);
+ if (!info->regulator_descriptor)
+ return -ENOMEM;
+
+ info->regulator_descriptor->enable_is_inverted = !(regval & F_SEL_PIN);
+
+ regulator_config.dev = dev;
+ regulator_config.of_node = dev->of_node;
+ regulator_config.regmap = info->regmap;
+
+ rdev = devm_regulator_register(dev, info->regulator_descriptor,
+ &regulator_config);
+ ret = PTR_ERR_OR_ZERO(rdev);
+ if (ret) {
+ dev_err(dev, "Failed to register VBUS regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (irq_a_det > 0) {
+ ret = devm_request_threaded_irq(dev, irq_a_det, NULL,
+ ucs1002_charger_irq,
+ IRQF_ONESHOT,
+ "ucs1002-a_det", info);
+ if (ret) {
+ dev_err(dev, "Failed to request A_DET threaded irq: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (irq_alert > 0) {
+ ret = devm_request_threaded_irq(dev, irq_alert, NULL,
+ ucs1002_alert_irq,
+ IRQF_ONESHOT,
+ "ucs1002-alert", info);
+ if (ret) {
+ dev_err(dev, "Failed to request ALERT threaded irq: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ucs1002_of_match[] = {
+ { .compatible = "microchip,ucs1002", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ucs1002_of_match);
+
+static struct i2c_driver ucs1002_driver = {
+ .driver = {
+ .name = "ucs1002",
+ .of_match_table = ucs1002_of_match,
+ },
+ .probe = ucs1002_probe,
+};
+module_i2c_driver(ucs1002_driver);
+
+MODULE_DESCRIPTION("Microchip UCS1002 Programmable USB Port Power Controller");
+MODULE_AUTHOR("Enric Balletbo Serra <enric.balletbo@collabora.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index dd5d1103e02b..4b6418039387 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -31,19 +31,25 @@
#include <linux/slab.h>
#include <linux/pps_kernel.h>
#include <linux/pps-gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/list.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
/* Info for each registered platform device */
struct pps_gpio_device_data {
int irq; /* IRQ used as PPS source */
struct pps_device *pps; /* PPS source device */
struct pps_source_info info; /* PPS source information */
+ struct gpio_desc *gpio_pin; /* GPIO port descriptors */
+ struct gpio_desc *echo_pin;
+ struct timer_list echo_timer; /* timer to reset echo active state */
bool assert_falling_edge;
bool capture_clear;
- unsigned int gpio_pin;
+ unsigned int echo_active_ms; /* PPS echo active duration */
+ unsigned long echo_timeout; /* timer timeout value in jiffies */
};
/*
@@ -61,18 +67,101 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
info = data;
- rising_edge = gpio_get_value(info->gpio_pin);
+ rising_edge = gpiod_get_value(info->gpio_pin);
if ((rising_edge && !info->assert_falling_edge) ||
(!rising_edge && info->assert_falling_edge))
- pps_event(info->pps, &ts, PPS_CAPTUREASSERT, NULL);
+ pps_event(info->pps, &ts, PPS_CAPTUREASSERT, data);
else if (info->capture_clear &&
((rising_edge && info->assert_falling_edge) ||
- (!rising_edge && !info->assert_falling_edge)))
- pps_event(info->pps, &ts, PPS_CAPTURECLEAR, NULL);
+ (!rising_edge && !info->assert_falling_edge)))
+ pps_event(info->pps, &ts, PPS_CAPTURECLEAR, data);
return IRQ_HANDLED;
}
+/* This function will only be called when an ECHO GPIO is defined */
+static void pps_gpio_echo(struct pps_device *pps, int event, void *data)
+{
+ /* add_timer() needs to write into info->echo_timer */
+ struct pps_gpio_device_data *info = data;
+
+ switch (event) {
+ case PPS_CAPTUREASSERT:
+ if (pps->params.mode & PPS_ECHOASSERT)
+ gpiod_set_value(info->echo_pin, 1);
+ break;
+
+ case PPS_CAPTURECLEAR:
+ if (pps->params.mode & PPS_ECHOCLEAR)
+ gpiod_set_value(info->echo_pin, 1);
+ break;
+ }
+
+ /* fire the timer */
+ if (info->pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) {
+ info->echo_timer.expires = jiffies + info->echo_timeout;
+ add_timer(&info->echo_timer);
+ }
+}
+
+/* Timer callback to reset the echo pin to the inactive state */
+static void pps_gpio_echo_timer_callback(struct timer_list *t)
+{
+ const struct pps_gpio_device_data *info;
+
+ info = from_timer(info, t, echo_timer);
+
+ gpiod_set_value(info->echo_pin, 0);
+}
+
+static int pps_gpio_setup(struct platform_device *pdev)
+{
+ struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ u32 value;
+
+ data->gpio_pin = devm_gpiod_get(&pdev->dev,
+ NULL, /* request "gpios" */
+ GPIOD_IN);
+ if (IS_ERR(data->gpio_pin)) {
+ dev_err(&pdev->dev,
+ "failed to request PPS GPIO\n");
+ return PTR_ERR(data->gpio_pin);
+ }
+
+ data->echo_pin = devm_gpiod_get_optional(&pdev->dev,
+ "echo",
+ GPIOD_OUT_LOW);
+ if (data->echo_pin) {
+ if (IS_ERR(data->echo_pin)) {
+ dev_err(&pdev->dev, "failed to request ECHO GPIO\n");
+ return PTR_ERR(data->echo_pin);
+ }
+
+ ret = of_property_read_u32(np,
+ "echo-active-ms",
+ &value);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get echo-active-ms from OF\n");
+ return ret;
+ }
+ data->echo_active_ms = value;
+ /* sanity check on echo_active_ms */
+ if (!data->echo_active_ms || data->echo_active_ms > 999) {
+ dev_err(&pdev->dev,
+ "echo-active-ms: %u - bad value from OF\n",
+ data->echo_active_ms);
+ return -EINVAL;
+ }
+ }
+
+ if (of_property_read_bool(np, "assert-falling-edge"))
+ data->assert_falling_edge = true;
+ return 0;
+}
+
static unsigned long
get_irqf_trigger_flags(const struct pps_gpio_device_data *data)
{
@@ -90,53 +179,32 @@ get_irqf_trigger_flags(const struct pps_gpio_device_data *data)
static int pps_gpio_probe(struct platform_device *pdev)
{
struct pps_gpio_device_data *data;
- const char *gpio_label;
int ret;
int pps_default_params;
const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct device_node *np = pdev->dev.of_node;
/* allocate space for device info */
- data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data),
- GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ platform_set_drvdata(pdev, data);
+ /* GPIO setup */
if (pdata) {
data->gpio_pin = pdata->gpio_pin;
- gpio_label = pdata->gpio_label;
+ data->echo_pin = pdata->echo_pin;
data->assert_falling_edge = pdata->assert_falling_edge;
data->capture_clear = pdata->capture_clear;
+ data->echo_active_ms = pdata->echo_active_ms;
} else {
- ret = of_get_gpio(np, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get GPIO from device tree\n");
- return ret;
- }
- data->gpio_pin = ret;
- gpio_label = PPS_GPIO_NAME;
-
- if (of_get_property(np, "assert-falling-edge", NULL))
- data->assert_falling_edge = true;
- }
-
- /* GPIO setup */
- ret = devm_gpio_request(&pdev->dev, data->gpio_pin, gpio_label);
- if (ret) {
- dev_err(&pdev->dev, "failed to request GPIO %u\n",
- data->gpio_pin);
- return ret;
- }
-
- ret = gpio_direction_input(data->gpio_pin);
- if (ret) {
- dev_err(&pdev->dev, "failed to set pin direction\n");
- return -EINVAL;
+ ret = pps_gpio_setup(pdev);
+ if (ret)
+ return -EINVAL;
}
/* IRQ setup */
- ret = gpio_to_irq(data->gpio_pin);
+ ret = gpiod_to_irq(data->gpio_pin);
if (ret < 0) {
dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
return -EINVAL;
@@ -152,6 +220,11 @@ static int pps_gpio_probe(struct platform_device *pdev)
data->info.owner = THIS_MODULE;
snprintf(data->info.name, PPS_MAX_NAME_LEN - 1, "%s.%d",
pdev->name, pdev->id);
+ if (data->echo_pin) {
+ data->info.echo = pps_gpio_echo;
+ data->echo_timeout = msecs_to_jiffies(data->echo_active_ms);
+ timer_setup(&data->echo_timer, pps_gpio_echo_timer_callback, 0);
+ }
/* register PPS source */
pps_default_params = PPS_CAPTUREASSERT | PPS_OFFSETASSERT;
@@ -173,7 +246,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- platform_set_drvdata(pdev, data);
dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n",
data->irq);
@@ -185,6 +257,11 @@ static int pps_gpio_remove(struct platform_device *pdev)
struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
pps_unregister_source(data->pps);
+ if (data->echo_pin) {
+ del_timer_sync(&data->echo_timer);
+ /* reset echo pin in any case */
+ gpiod_set_value(data->echo_pin, 0);
+ }
dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
return 0;
}
@@ -209,4 +286,4 @@ MODULE_AUTHOR("Ricardo Martins <rasm@fe.up.pt>");
MODULE_AUTHOR("James Nuss <jamesnuss@nanometrics.ca>");
MODULE_DESCRIPTION("Use GPIO pin as PPS source");
MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.0");
+MODULE_VERSION("1.2.0");
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 54f8238aac0d..1311b54089be 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -210,6 +210,17 @@ config PWM_IMX27
To compile this driver as a module, choose M here: the module
will be called pwm-imx27.
+config PWM_IMX_TPM
+ tristate "i.MX TPM PWM support"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAVE_CLK && HAS_IOMEM
+ help
+ Generic PWM framework driver for i.MX7ULP TPM module, TPM's full
+ name is Low Power Timer/Pulse Width Modulation Module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-imx-tpm.
+
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
depends on MACH_INGENIC
@@ -467,10 +478,9 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
- depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3
help
- PWM driver support for the EHRPWM controller found on AM33XX
- TI SOC
+ PWM driver support for the EHRPWM controller found on TI SOCs
To compile this driver as a module, choose M here: the module
will be called pwm-tiehrpwm.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 448825e892bc..c368599d36c0 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o
+obj-$(CONFIG_PWM_IMX_TPM) += pwm-imx-tpm.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 3149204567f3..3998ebd51db4 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
if (IS_ENABLED(CONFIG_OF))
of_pwmchip_add(chip);
- pwmchip_sysfs_export(chip);
-
out:
mutex_unlock(&pwm_lock);
+
+ if (!ret)
+ pwmchip_sysfs_export(chip);
+
return ret;
}
EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
@@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip)
unsigned int i;
int ret = 0;
- pwmchip_sysfs_unexport_children(chip);
+ pwmchip_sysfs_unexport(chip);
mutex_lock(&pwm_lock);
@@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
- pwmchip_sysfs_unexport(chip);
-
out:
mutex_unlock(&pwm_lock);
return ret;
@@ -877,6 +877,7 @@ void pwm_put(struct pwm_device *pwm)
if (pwm->chip->ops->free)
pwm->chip->ops->free(pwm->chip, pwm);
+ pwm_set_chip_data(pwm, NULL);
pwm->label = NULL;
module_put(pwm->chip->ops->owner);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 7c8d6a168ceb..b91c477cc84b 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -84,7 +84,6 @@ static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
- pwm_set_chip_data(pwm, NULL);
kfree(channel);
}
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index bbf10ae02f0e..fa168581e6b8 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -35,7 +35,7 @@
#include <asm/div64.h>
-#include <mach/platform.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
+#include <linux/soc/cirrus/ep93xx.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
#define EP93XX_PWMx_TERM_COUNT 0x00
#define EP93XX_PWMx_DUTY_CYCLE 0x04
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 815f5333bb8f..1cc5fbe1e1d3 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -123,7 +123,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
} else if (mul <= max_timebase * 512) {
div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
timebase = DIV_ROUND_UP(mul, 512);
- } else if (mul > max_timebase * 512) {
+ } else {
dev_err(chip->dev,
"failed to configure timebase steps/divider value\n");
return -EINVAL;
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
new file mode 100644
index 000000000000..e8385c1cf342
--- /dev/null
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2019 NXP.
+ *
+ * Limitations:
+ * - The TPM counter and period counter are shared between
+ * multiple channels, so all channels should use same period
+ * settings.
+ * - Changes to polarity cannot be latched at the time of the
+ * next period start.
+ * - Changing period and duty cycle together isn't atomic,
+ * with the wrong timing it might happen that a period is
+ * produced with old duty cycle but new period settings.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define PWM_IMX_TPM_PARAM 0x4
+#define PWM_IMX_TPM_GLOBAL 0x8
+#define PWM_IMX_TPM_SC 0x10
+#define PWM_IMX_TPM_CNT 0x14
+#define PWM_IMX_TPM_MOD 0x18
+#define PWM_IMX_TPM_CnSC(n) (0x20 + (n) * 0x8)
+#define PWM_IMX_TPM_CnV(n) (0x24 + (n) * 0x8)
+
+#define PWM_IMX_TPM_PARAM_CHAN GENMASK(7, 0)
+
+#define PWM_IMX_TPM_SC_PS GENMASK(2, 0)
+#define PWM_IMX_TPM_SC_CMOD GENMASK(4, 3)
+#define PWM_IMX_TPM_SC_CMOD_INC_EVERY_CLK FIELD_PREP(PWM_IMX_TPM_SC_CMOD, 1)
+#define PWM_IMX_TPM_SC_CPWMS BIT(5)
+
+#define PWM_IMX_TPM_CnSC_CHF BIT(7)
+#define PWM_IMX_TPM_CnSC_MSB BIT(5)
+#define PWM_IMX_TPM_CnSC_MSA BIT(4)
+
+/*
+ * The reference manual describes this field as two separate bits. The
+ * semantic of the two bits isn't orthogonal though, so they are treated
+ * together as a 2-bit field here.
+ */
+#define PWM_IMX_TPM_CnSC_ELS GENMASK(3, 2)
+#define PWM_IMX_TPM_CnSC_ELS_INVERSED FIELD_PREP(PWM_IMX_TPM_CnSC_ELS, 1)
+#define PWM_IMX_TPM_CnSC_ELS_NORMAL FIELD_PREP(PWM_IMX_TPM_CnSC_ELS, 2)
+
+
+#define PWM_IMX_TPM_MOD_WIDTH 16
+#define PWM_IMX_TPM_MOD_MOD GENMASK(PWM_IMX_TPM_MOD_WIDTH - 1, 0)
+
+struct imx_tpm_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+ struct mutex lock;
+ u32 user_count;
+ u32 enable_count;
+ u32 real_period;
+};
+
+struct imx_tpm_pwm_param {
+ u8 prescale;
+ u32 mod;
+ u32 val;
+};
+
+static inline struct imx_tpm_pwm_chip *
+to_imx_tpm_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct imx_tpm_pwm_chip, chip);
+}
+
+/*
+ * This function determines for a given pwm_state *state that a consumer
+ * might request the pwm_state *real_state that eventually is implemented
+ * by the hardware and the necessary register values (in *p) to achieve
+ * this.
+ */
+static int pwm_imx_tpm_round_state(struct pwm_chip *chip,
+ struct imx_tpm_pwm_param *p,
+ struct pwm_state *real_state,
+ struct pwm_state *state)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ u32 rate, prescale, period_count, clock_unit;
+ u64 tmp;
+
+ rate = clk_get_rate(tpm->clk);
+ tmp = (u64)state->period * rate;
+ clock_unit = DIV_ROUND_CLOSEST_ULL(tmp, NSEC_PER_SEC);
+ if (clock_unit <= PWM_IMX_TPM_MOD_MOD)
+ prescale = 0;
+ else
+ prescale = ilog2(clock_unit) + 1 - PWM_IMX_TPM_MOD_WIDTH;
+
+ if ((!FIELD_FIT(PWM_IMX_TPM_SC_PS, prescale)))
+ return -ERANGE;
+ p->prescale = prescale;
+
+ period_count = (clock_unit + ((1 << prescale) >> 1)) >> prescale;
+ p->mod = period_count;
+
+ /* calculate real period HW can support */
+ tmp = (u64)period_count << prescale;
+ tmp *= NSEC_PER_SEC;
+ real_state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate);
+
+ /*
+ * if eventually the PWM output is inactive, either
+ * duty cycle is 0 or status is disabled, need to
+ * make sure the output pin is inactive.
+ */
+ if (!state->enabled)
+ real_state->duty_cycle = 0;
+ else
+ real_state->duty_cycle = state->duty_cycle;
+
+ tmp = (u64)p->mod * real_state->duty_cycle;
+ p->val = DIV_ROUND_CLOSEST_ULL(tmp, real_state->period);
+
+ real_state->polarity = state->polarity;
+ real_state->enabled = state->enabled;
+
+ return 0;
+}
+
+static void pwm_imx_tpm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ u32 rate, val, prescale;
+ u64 tmp;
+
+ /* get period */
+ state->period = tpm->real_period;
+
+ /* get duty cycle */
+ rate = clk_get_rate(tpm->clk);
+ val = readl(tpm->base + PWM_IMX_TPM_SC);
+ prescale = FIELD_GET(PWM_IMX_TPM_SC_PS, val);
+ tmp = readl(tpm->base + PWM_IMX_TPM_CnV(pwm->hwpwm));
+ tmp = (tmp << prescale) * NSEC_PER_SEC;
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate);
+
+ /* get polarity */
+ val = readl(tpm->base + PWM_IMX_TPM_CnSC(pwm->hwpwm));
+ if ((val & PWM_IMX_TPM_CnSC_ELS) == PWM_IMX_TPM_CnSC_ELS_INVERSED)
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ /*
+ * Assume reserved values (2b00 and 2b11) to yield
+ * normal polarity.
+ */
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ /* get channel status */
+ state->enabled = FIELD_GET(PWM_IMX_TPM_CnSC_ELS, val) ? true : false;
+}
+
+/* this function is supposed to be called with mutex hold */
+static int pwm_imx_tpm_apply_hw(struct pwm_chip *chip,
+ struct imx_tpm_pwm_param *p,
+ struct pwm_state *state,
+ struct pwm_device *pwm)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ bool period_update = false;
+ bool duty_update = false;
+ u32 val, cmod, cur_prescale;
+ unsigned long timeout;
+ struct pwm_state c;
+
+ if (state->period != tpm->real_period) {
+ /*
+ * TPM counter is shared by multiple channels, so
+ * prescale and period can NOT be modified when
+ * there are multiple channels in use with different
+ * period settings.
+ */
+ if (tpm->user_count > 1)
+ return -EBUSY;
+
+ val = readl(tpm->base + PWM_IMX_TPM_SC);
+ cmod = FIELD_GET(PWM_IMX_TPM_SC_CMOD, val);
+ cur_prescale = FIELD_GET(PWM_IMX_TPM_SC_PS, val);
+ if (cmod && cur_prescale != p->prescale)
+ return -EBUSY;
+
+ /* set TPM counter prescale */
+ val &= ~PWM_IMX_TPM_SC_PS;
+ val |= FIELD_PREP(PWM_IMX_TPM_SC_PS, p->prescale);
+ writel(val, tpm->base + PWM_IMX_TPM_SC);
+
+ /*
+ * set period count:
+ * if the PWM is disabled (CMOD[1:0] = 2b00), then MOD register
+ * is updated when MOD register is written.
+ *
+ * if the PWM is enabled (CMOD[1:0] ≠ 2b00), the period length
+ * is latched into hardware when the next period starts.
+ */
+ writel(p->mod, tpm->base + PWM_IMX_TPM_MOD);
+ tpm->real_period = state->period;
+ period_update = true;
+ }
+
+ pwm_imx_tpm_get_state(chip, pwm, &c);
+
+ /* polarity is NOT allowed to be changed if PWM is active */
+ if (c.enabled && c.polarity != state->polarity)
+ return -EBUSY;
+
+ if (state->duty_cycle != c.duty_cycle) {
+ /*
+ * set channel value:
+ * if the PWM is disabled (CMOD[1:0] = 2b00), then CnV register
+ * is updated when CnV register is written.
+ *
+ * if the PWM is enabled (CMOD[1:0] ≠ 2b00), the duty length
+ * is latched into hardware when the next period starts.
+ */
+ writel(p->val, tpm->base + PWM_IMX_TPM_CnV(pwm->hwpwm));
+ duty_update = true;
+ }
+
+ /* make sure MOD & CnV registers are updated */
+ if (period_update || duty_update) {
+ timeout = jiffies + msecs_to_jiffies(tpm->real_period /
+ NSEC_PER_MSEC + 1);
+ while (readl(tpm->base + PWM_IMX_TPM_MOD) != p->mod
+ || readl(tpm->base + PWM_IMX_TPM_CnV(pwm->hwpwm))
+ != p->val) {
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+ cpu_relax();
+ }
+ }
+
+ /*
+ * polarity settings will enabled/disable output status
+ * immediately, so if the channel is disabled, need to
+ * make sure MSA/MSB/ELS are set to 0 which means channel
+ * disabled.
+ */
+ val = readl(tpm->base + PWM_IMX_TPM_CnSC(pwm->hwpwm));
+ val &= ~(PWM_IMX_TPM_CnSC_ELS | PWM_IMX_TPM_CnSC_MSA |
+ PWM_IMX_TPM_CnSC_MSB);
+ if (state->enabled) {
+ /*
+ * set polarity (for edge-aligned PWM modes)
+ *
+ * ELS[1:0] = 2b10 yields normal polarity behaviour,
+ * ELS[1:0] = 2b01 yields inversed polarity.
+ * The other values are reserved.
+ */
+ val |= PWM_IMX_TPM_CnSC_MSB;
+ val |= (state->polarity == PWM_POLARITY_NORMAL) ?
+ PWM_IMX_TPM_CnSC_ELS_NORMAL :
+ PWM_IMX_TPM_CnSC_ELS_INVERSED;
+ }
+ writel(val, tpm->base + PWM_IMX_TPM_CnSC(pwm->hwpwm));
+
+ /* control the counter status */
+ if (state->enabled != c.enabled) {
+ val = readl(tpm->base + PWM_IMX_TPM_SC);
+ if (state->enabled) {
+ if (++tpm->enable_count == 1)
+ val |= PWM_IMX_TPM_SC_CMOD_INC_EVERY_CLK;
+ } else {
+ if (--tpm->enable_count == 0)
+ val &= ~PWM_IMX_TPM_SC_CMOD;
+ }
+ writel(val, tpm->base + PWM_IMX_TPM_SC);
+ }
+
+ return 0;
+}
+
+static int pwm_imx_tpm_apply(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ struct imx_tpm_pwm_param param;
+ struct pwm_state real_state;
+ int ret;
+
+ ret = pwm_imx_tpm_round_state(chip, &param, &real_state, state);
+ if (ret)
+ return ret;
+
+ mutex_lock(&tpm->lock);
+ ret = pwm_imx_tpm_apply_hw(chip, &param, &real_state, pwm);
+ mutex_unlock(&tpm->lock);
+
+ return ret;
+}
+
+static int pwm_imx_tpm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+
+ mutex_lock(&tpm->lock);
+ tpm->user_count++;
+ mutex_unlock(&tpm->lock);
+
+ return 0;
+}
+
+static void pwm_imx_tpm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+
+ mutex_lock(&tpm->lock);
+ tpm->user_count--;
+ mutex_unlock(&tpm->lock);
+}
+
+static const struct pwm_ops imx_tpm_pwm_ops = {
+ .request = pwm_imx_tpm_request,
+ .free = pwm_imx_tpm_free,
+ .get_state = pwm_imx_tpm_get_state,
+ .apply = pwm_imx_tpm_apply,
+ .owner = THIS_MODULE,
+};
+
+static int pwm_imx_tpm_probe(struct platform_device *pdev)
+{
+ struct imx_tpm_pwm_chip *tpm;
+ int ret;
+ u32 val;
+
+ tpm = devm_kzalloc(&pdev->dev, sizeof(*tpm), GFP_KERNEL);
+ if (!tpm)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tpm);
+
+ tpm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tpm->base))
+ return PTR_ERR(tpm->base);
+
+ tpm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tpm->clk)) {
+ ret = PTR_ERR(tpm->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get PWM clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tpm->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to prepare or enable clock: %d\n", ret);
+ return ret;
+ }
+
+ tpm->chip.dev = &pdev->dev;
+ tpm->chip.ops = &imx_tpm_pwm_ops;
+ tpm->chip.base = -1;
+ tpm->chip.of_xlate = of_pwm_xlate_with_flags;
+ tpm->chip.of_pwm_n_cells = 3;
+
+ /* get number of channels */
+ val = readl(tpm->base + PWM_IMX_TPM_PARAM);
+ tpm->chip.npwm = FIELD_GET(PWM_IMX_TPM_PARAM_CHAN, val);
+
+ mutex_init(&tpm->lock);
+
+ ret = pwmchip_add(&tpm->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ clk_disable_unprepare(tpm->clk);
+ }
+
+ return ret;
+}
+
+static int pwm_imx_tpm_remove(struct platform_device *pdev)
+{
+ struct imx_tpm_pwm_chip *tpm = platform_get_drvdata(pdev);
+ int ret = pwmchip_remove(&tpm->chip);
+
+ clk_disable_unprepare(tpm->clk);
+
+ return ret;
+}
+
+static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
+{
+ struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
+
+ if (tpm->enable_count > 0)
+ return -EBUSY;
+
+ clk_disable_unprepare(tpm->clk);
+
+ return 0;
+}
+
+static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
+{
+ struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(tpm->clk);
+ if (ret)
+ dev_err(dev,
+ "failed to prepare or enable clock: %d\n",
+ ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(imx_tpm_pwm_pm,
+ pwm_imx_tpm_suspend, pwm_imx_tpm_resume);
+
+static const struct of_device_id imx_tpm_pwm_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_tpm_pwm_dt_ids);
+
+static struct platform_driver imx_tpm_pwm_driver = {
+ .driver = {
+ .name = "imx7ulp-tpm-pwm",
+ .of_match_table = imx_tpm_pwm_dt_ids,
+ .pm = &imx_tpm_pwm_pm,
+ },
+ .probe = pwm_imx_tpm_probe,
+ .remove = pwm_imx_tpm_remove,
+};
+module_platform_driver(imx_tpm_pwm_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("i.MX TPM PWM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 806130654211..434a351fb626 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -291,7 +291,6 @@ MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
static int pwm_imx27_probe(struct platform_device *pdev)
{
struct pwm_imx27_chip *imx;
- struct resource *r;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (imx == NULL)
@@ -326,8 +325,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
imx->chip.of_xlate = of_pwm_xlate_with_flags;
imx->chip.of_pwm_n_cells = 3;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 4ae5d774443e..fb5a369b1a8d 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -111,6 +111,10 @@ struct meson_pwm {
const struct meson_pwm_data *data;
void __iomem *base;
u8 inverter_mask;
+ /*
+ * Protects register (write) access to the REG_MISC_AB register
+ * that is shared between the two PWMs.
+ */
spinlock_t lock;
};
@@ -184,7 +188,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
do_div(fin_ps, fin_freq);
/* Calc pre_div with the period */
- for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) {
+ for (pre_div = 0; pre_div <= MISC_CLK_DIV_MASK; pre_div++) {
cnt = DIV_ROUND_CLOSEST_ULL((u64)period * 1000,
fin_ps * (pre_div + 1));
dev_dbg(meson->chip.dev, "fin_ps=%llu pre_div=%u cnt=%u\n",
@@ -193,7 +197,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
break;
}
- if (pre_div == MISC_CLK_DIV_MASK) {
+ if (pre_div > MISC_CLK_DIV_MASK) {
dev_err(meson->chip.dev, "unable to get period pre_div\n");
return -EINVAL;
}
@@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson,
{
u32 value, clk_shift, clk_enable, enable;
unsigned int offset;
+ unsigned long flags;
switch (id) {
case 0:
@@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson,
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~(MISC_CLK_DIV_MASK << clk_shift);
value |= channel->pre_div << clk_shift;
@@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson,
value = readl(meson->base + REG_MISC_AB);
value |= enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
{
u32 value, enable;
+ unsigned long flags;
switch (id) {
case 0:
@@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -296,29 +310,21 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
- unsigned long flags;
int err = 0;
if (!state)
return -EINVAL;
- spin_lock_irqsave(&meson->lock, flags);
-
if (!state->enabled) {
meson_pwm_disable(meson, pwm->hwpwm);
channel->state.enabled = false;
- goto unlock;
+ return 0;
}
if (state->period != channel->state.period ||
state->duty_cycle != channel->state.duty_cycle ||
state->polarity != channel->state.polarity) {
- if (channel->state.enabled) {
- meson_pwm_disable(meson, pwm->hwpwm);
- channel->state.enabled = false;
- }
-
if (state->polarity != channel->state.polarity) {
if (state->polarity == PWM_POLARITY_NORMAL)
meson->inverter_mask |= BIT(pwm->hwpwm);
@@ -329,7 +335,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
err = meson_pwm_calc(meson, channel, pwm->hwpwm,
state->duty_cycle, state->period);
if (err < 0)
- goto unlock;
+ return err;
channel->state.polarity = state->polarity;
channel->state.period = state->period;
@@ -341,9 +347,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
channel->state.enabled = true;
}
-unlock:
- spin_unlock_irqrestore(&meson->lock, flags);
- return err;
+ return 0;
}
static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -429,6 +433,24 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
.num_parents = ARRAY_SIZE(pwm_axg_ao_parent_names),
};
+static const char * const pwm_g12a_ao_cd_parent_names[] = {
+ "aoclk81", "xtal",
+};
+
+static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
+ .parent_names = pwm_g12a_ao_cd_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_g12a_ao_cd_parent_names),
+};
+
+static const char * const pwm_g12a_ee_parent_names[] = {
+ "xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
+};
+
+static const struct meson_pwm_data pwm_g12a_ee_data = {
+ .parent_names = pwm_g12a_ee_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_g12a_ee_parent_names),
+};
+
static const struct of_device_id meson_pwm_matches[] = {
{
.compatible = "amlogic,meson8b-pwm",
@@ -450,6 +472,18 @@ static const struct of_device_id meson_pwm_matches[] = {
.compatible = "amlogic,meson-axg-ao-pwm",
.data = &pwm_axg_ao_data
},
+ {
+ .compatible = "amlogic,meson-g12a-ee-pwm",
+ .data = &pwm_g12a_ee_data
+ },
+ {
+ .compatible = "amlogic,meson-g12a-ao-pwm-ab",
+ .data = &pwm_axg_ao_data
+ },
+ {
+ .compatible = "amlogic,meson-g12a-ao-pwm-cd",
+ .data = &pwm_g12a_ao_cd_data
+ },
{},
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index a7eaf962a95b..567f5e2771c4 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -176,7 +176,6 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
pm_runtime_put(pca->chip.dev);
mutex_lock(&pca->lock);
pwm = &pca->chip.pwms[offset];
- pwm_set_chip_data(pwm, NULL);
mutex_unlock(&pca->lock);
}
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 062f2cfc45ec..6674e1e80175 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -226,7 +226,7 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
return -EINVAL;
}
- our_chan = devm_kzalloc(chip->dev, sizeof(*our_chan), GFP_KERNEL);
+ our_chan = kzalloc(sizeof(*our_chan), GFP_KERNEL);
if (!our_chan)
return -ENOMEM;
@@ -237,8 +237,7 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- devm_kfree(chip->dev, pwm_get_chip_data(pwm));
- pwm_set_chip_data(pwm, NULL);
+ kfree(pwm_get_chip_data(pwm));
}
static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index f7b8a86fa5c5..ad4a40c0f27c 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
}
/* Update shadow register first before modifying active register */
+ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
+ AQSFRC_RLDCSF_ZRO);
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index ceb233dd6048..719f8fada0a7 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -398,7 +398,7 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
/*
* If device_create() fails the pwm_chip is still usable by
- * the kernel its just not exported.
+ * the kernel it's just not exported.
*/
parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
"pwmchip%d", chip->base);
@@ -411,19 +411,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
struct device *parent;
-
- parent = class_find_device(&pwm_class, NULL, chip,
- pwmchip_sysfs_match);
- if (parent) {
- /* for class_find_device() */
- put_device(parent);
- device_unregister(parent);
- }
-}
-
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
- struct device *parent;
unsigned int i;
parent = class_find_device(&pwm_class, NULL, chip,
@@ -439,6 +426,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
}
put_device(parent);
+ device_unregister(parent);
}
static int __init pwm_sysfs_init(void)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 1e1f42e210a0..4a4a75fa26d5 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -868,7 +868,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
pinned = get_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages, dir == DMA_FROM_DEVICE, page_list);
+ nr_pages,
+ dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
+ page_list);
if (pinned != nr_pages) {
if (pinned < 0) {
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index cf45829585cb..b29fc258eeba 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2147,6 +2147,14 @@ static int riocm_add_mport(struct device *dev,
mutex_init(&cm->rx_lock);
riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ if (!cm->rx_wq) {
+ riocm_error("failed to allocate IBMBOX_%d on %s",
+ cmbox, mport->name);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENOMEM;
+ }
+
INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
cm->tx_slot = 0;
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index 2ef1f13aa47b..99e75d92dada 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -79,11 +79,11 @@ static int zynqmp_reset_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
-
priv->eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!priv->eemi_ops)
- return -ENXIO;
+ if (IS_ERR(priv->eemi_ops))
+ return PTR_ERR(priv->eemi_ops);
+
+ platform_set_drvdata(pdev, priv);
priv->rcdev.ops = &zynqmp_reset_ops;
priv->rcdev.owner = THIS_MODULE;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 32994b0dd139..a2941c875a06 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -403,15 +403,12 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
static struct omap_rtc *omap_rtc_power_off_rtc;
-/*
- * omap_rtc_poweroff: RTC-controlled power off
- *
- * The RTC can be used to control an external PMIC via the pmic_power_en pin,
- * which can be configured to transition to OFF on ALARM2 events.
- *
- * Called with local interrupts disabled.
+/**
+ * omap_rtc_power_off_program: Set the pmic power off sequence. The RTC
+ * generates pmic_pwr_enable control, which can be used to control an external
+ * PMIC.
*/
-static void omap_rtc_power_off(void)
+int omap_rtc_power_off_program(struct device *dev)
{
struct omap_rtc *rtc = omap_rtc_power_off_rtc;
struct rtc_time tm;
@@ -425,6 +422,9 @@ static void omap_rtc_power_off(void)
rtc_writel(rtc, OMAP_RTC_PMIC_REG, val | OMAP_RTC_PMIC_POWER_EN_EN);
again:
+ /* Clear any existing ALARM2 event */
+ rtc_writel(rtc, OMAP_RTC_STATUS_REG, OMAP_RTC_STATUS_ALARM2);
+
/* set alarm one second from now */
omap_rtc_read_time_raw(rtc, &tm);
seconds = tm.tm_sec;
@@ -461,6 +461,39 @@ again:
rtc->type->lock(rtc);
+ return 0;
+}
+EXPORT_SYMBOL(omap_rtc_power_off_program);
+
+/*
+ * omap_rtc_poweroff: RTC-controlled power off
+ *
+ * The RTC can be used to control an external PMIC via the pmic_power_en pin,
+ * which can be configured to transition to OFF on ALARM2 events.
+ *
+ * Notes:
+ * The one-second alarm offset is the shortest offset possible as the alarm
+ * registers must be set before the next timer update and the offset
+ * calculation is too heavy for everything to be done within a single access
+ * period (~15 us).
+ *
+ * Called with local interrupts disabled.
+ */
+static void omap_rtc_power_off(void)
+{
+ struct rtc_device *rtc = omap_rtc_power_off_rtc->rtc;
+ u32 val;
+
+ omap_rtc_power_off_program(rtc->dev.parent);
+
+ /* Set PMIC power enable and EXT_WAKEUP in case PB power on is used */
+ omap_rtc_power_off_rtc->type->unlock(omap_rtc_power_off_rtc);
+ val = rtc_readl(omap_rtc_power_off_rtc, OMAP_RTC_PMIC_REG);
+ val |= OMAP_RTC_PMIC_POWER_EN_EN | OMAP_RTC_PMIC_EXT_WKUP_POL(0) |
+ OMAP_RTC_PMIC_EXT_WKUP_EN(0);
+ rtc_writel(omap_rtc_power_off_rtc, OMAP_RTC_PMIC_REG, val);
+ omap_rtc_power_off_rtc->type->lock(omap_rtc_power_off_rtc);
+
/*
* Wait for alarm to trigger (within one second) and external PMIC to
* power off the system. Add a 500 ms margin for external latencies
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
index e62bda0cb53e..8ad4c4e6d557 100644
--- a/drivers/rtc/rtc-wilco-ec.c
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -21,8 +21,20 @@
#define EC_CMOS_TOD_WRITE 0x02
#define EC_CMOS_TOD_READ 0x08
+/* Message sent to the EC to request the current time. */
+struct ec_rtc_read_request {
+ u8 command;
+ u8 reserved;
+ u8 param;
+} __packed;
+static struct ec_rtc_read_request read_rq = {
+ .command = EC_COMMAND_CMOS,
+ .param = EC_CMOS_TOD_READ,
+};
+
/**
- * struct ec_rtc_read - Format of RTC returned by EC.
+ * struct ec_rtc_read_response - Format of RTC returned by EC.
+ * @reserved: Unused byte
* @second: Second value (0..59)
* @minute: Minute value (0..59)
* @hour: Hour value (0..23)
@@ -33,7 +45,8 @@
*
* All values are presented in binary (not BCD).
*/
-struct ec_rtc_read {
+struct ec_rtc_read_response {
+ u8 reserved;
u8 second;
u8 minute;
u8 hour;
@@ -44,8 +57,10 @@ struct ec_rtc_read {
} __packed;
/**
- * struct ec_rtc_write - Format of RTC sent to the EC.
- * @param: EC_CMOS_TOD_WRITE
+ * struct ec_rtc_write_request - Format of RTC sent to the EC.
+ * @command: Always EC_COMMAND_CMOS
+ * @reserved: Unused byte
+ * @param: Always EC_CMOS_TOD_WRITE
* @century: Century value (full year / 100)
* @year: Year value (full year % 100)
* @month: Month value (1..12)
@@ -57,7 +72,9 @@ struct ec_rtc_read {
*
* All values are presented in BCD.
*/
-struct ec_rtc_write {
+struct ec_rtc_write_request {
+ u8 command;
+ u8 reserved;
u8 param;
u8 century;
u8 year;
@@ -72,19 +89,17 @@ struct ec_rtc_write {
static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
- u8 param = EC_CMOS_TOD_READ;
- struct ec_rtc_read rtc;
- struct wilco_ec_message msg = {
- .type = WILCO_EC_MSG_LEGACY,
- .flags = WILCO_EC_FLAG_RAW_RESPONSE,
- .command = EC_COMMAND_CMOS,
- .request_data = &param,
- .request_size = sizeof(param),
- .response_data = &rtc,
- .response_size = sizeof(rtc),
- };
+ struct ec_rtc_read_response rtc;
+ struct wilco_ec_message msg;
int ret;
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &read_rq;
+ msg.request_size = sizeof(read_rq);
+ msg.response_data = &rtc;
+ msg.response_size = sizeof(rtc);
+
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
@@ -106,14 +121,8 @@ static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
- struct ec_rtc_write rtc;
- struct wilco_ec_message msg = {
- .type = WILCO_EC_MSG_LEGACY,
- .flags = WILCO_EC_FLAG_RAW_RESPONSE,
- .command = EC_COMMAND_CMOS,
- .request_data = &rtc,
- .request_size = sizeof(rtc),
- };
+ struct ec_rtc_write_request rtc;
+ struct wilco_ec_message msg;
int year = tm->tm_year + 1900;
/*
* Convert from 0=Sunday to 0=Saturday for the EC
@@ -123,6 +132,7 @@ static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
int wday = tm->tm_wday == 6 ? 0 : tm->tm_wday + 1;
int ret;
+ rtc.command = EC_COMMAND_CMOS;
rtc.param = EC_CMOS_TOD_WRITE;
rtc.century = bin2bcd(year / 100);
rtc.year = bin2bcd(year % 100);
@@ -133,6 +143,11 @@ static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
rtc.second = bin2bcd(tm->tm_sec);
rtc.weekday = bin2bcd(wday);
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = &rtc;
+ msg.request_size = sizeof(rtc);
+
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index f89f9d02e788..c09039eea707 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3827,7 +3827,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if ((start_padding_sectors || end_padding_sectors) &&
(rq_data_dir(req) == WRITE)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
- "raw write not track aligned (%lu,%lu) req %p",
+ "raw write not track aligned (%llu,%llu) req %p",
start_padding_sectors, end_padding_sectors, req);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index cfce255521ac..7b7620de2acd 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -205,17 +205,22 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
int auto_ack, int merge_pending)
{
unsigned char __state = 0;
- int i;
+ int i = 1;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
+
+ /* Bail out early if there is no work on the queue: */
+ if (__state & SLSB_OWNER_CU)
+ goto out;
+
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
- for (i = 1; i < count; i++) {
+ for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* merge PENDING into EMPTY: */
@@ -228,6 +233,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (q->slsb.val[bufnr] != __state)
break;
}
+
+out:
*state = __state;
return i;
}
@@ -382,7 +389,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0, 0);
+ return get_buf_state(q, bufnr, state, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
@@ -719,11 +726,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
multicast_outbound(q)))
qdio_siga_sync_q(q);
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
diff --git a/drivers/s390/cio/trace.c b/drivers/s390/cio/trace.c
index e331cd97e83b..882ee538ca30 100644
--- a/drivers/s390/cio/trace.c
+++ b/drivers/s390/cio/trace.c
@@ -21,5 +21,4 @@ EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
-EXPORT_TRACEPOINT_SYMBOL(s390_cio_rchp);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 0ebb29b6fd6d..4803139bce14 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -274,29 +274,6 @@ DEFINE_EVENT(s390_class_schid, s390_cio_rsch,
TP_ARGS(schid, cc)
);
-/**
- * s390_cio_rchp - Reset Channel Path (RCHP) instruction was performed
- * @chpid: Channel-Path Identifier
- * @cc: Condition code
- */
-TRACE_EVENT(s390_cio_rchp,
- TP_PROTO(struct chp_id chpid, int cc),
- TP_ARGS(chpid, cc),
- TP_STRUCT__entry(
- __field(u8, cssid)
- __field(u8, id)
- __field(int, cc)
- ),
- TP_fast_assign(
- __entry->cssid = chpid.cssid;
- __entry->id = chpid.id;
- __entry->cc = cc;
- ),
- TP_printk("chpid=%x.%02x cc=%d", __entry->cssid, __entry->id,
- __entry->cc
- )
-);
-
#define CHSC_MAX_REQUEST_LEN 64
#define CHSC_MAX_RESPONSE_LEN 64
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 991420caa4f2..6a3076881321 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -66,6 +66,7 @@ struct virtio_ccw_device {
bool device_lost;
unsigned int config_ready;
void *airq_info;
+ u64 dma_mask;
};
struct vq_info_block_legacy {
@@ -108,7 +109,6 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
- void *queue;
union {
struct vq_info_block s;
struct vq_info_block_legacy l;
@@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
struct virtio_ccw_vq_info *info = vq->priv;
unsigned long flags;
- unsigned long size;
int ret;
unsigned int index = vq->index;
@@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index);
vring_del_virtqueue(vq);
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- free_pages_exact(info->queue, size);
kfree(info->info_block);
kfree(info);
}
@@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
int err;
struct virtqueue *vq = NULL;
struct virtio_ccw_vq_info *info;
- unsigned long size = 0; /* silence the compiler */
+ u64 queue;
unsigned long flags;
+ bool may_reduce;
/* Allocate queue. */
info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
@@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = info->num;
goto out_err;
}
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
- if (info->queue == NULL) {
- dev_warn(&vcdev->cdev->dev, "no queue\n");
- err = -ENOMEM;
- goto out_err;
- }
+ may_reduce = vcdev->revision > 0;
+ vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
+ vdev, true, may_reduce, ctx,
+ virtio_ccw_kvm_notify, callback, name);
- vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
- true, ctx, info->queue, virtio_ccw_kvm_notify,
- callback, name);
if (!vq) {
/* For now, we fail if we can't get the requested size. */
dev_warn(&vcdev->cdev->dev, "no vq\n");
err = -ENOMEM;
goto out_err;
}
+ /* it may have been reduced */
+ info->num = virtqueue_get_vring_size(vq);
/* Register it with the host. */
+ queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
- info->info_block->l.queue = (__u64)info->queue;
+ info->info_block->l.queue = queue;
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
- info->info_block->s.desc = (__u64)info->queue;
+ info->info_block->s.desc = queue;
info->info_block->s.index = i;
info->info_block->s.num = info->num;
- info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
- info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+ info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
+ info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
@@ -572,8 +567,6 @@ out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
- if (info->queue)
- free_pages_exact(info->queue, size);
kfree(info->info_block);
}
kfree(info);
@@ -780,12 +773,8 @@ out_free:
static void ccw_transport_features(struct virtio_device *vdev)
{
/*
- * Packed ring isn't enabled on virtio_ccw for now,
- * because virtio_ccw uses some legacy accessors,
- * e.g. virtqueue_get_avail() and virtqueue_get_used()
- * which aren't available in packed ring currently.
+ * Currently nothing to do here.
*/
- __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
}
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
@@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM;
goto out_free;
}
+
+ vcdev->vdev.dev.parent = &cdev->dev;
+ cdev->dev.dma_mask = &vcdev->dma_mask;
+ /* we are fine with common virtio infrastructure using 64 bit DMA */
+ ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
+ goto out_free;
+ }
+
vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
GFP_DMA | GFP_KERNEL);
if (!vcdev->config_block) {
@@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
- vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
vcdev->cdev = cdev;
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index acd9ba40eabe..8090dc9a1514 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -437,7 +437,7 @@ static int dax_lock_page(void *va, struct page **p)
dax_dbg("uva %p", va);
- ret = get_user_pages_fast((unsigned long)va, 1, 1, p);
+ ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p);
if (ret == 1) {
dax_dbg("locked page %p, for VA %p", *p, va);
return 0;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 19c022e66d63..3c6a18ad9a87 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,7 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */
- res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
+ res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
+ pages);
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index e649ceaaa410..87d69e7471f9 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/stat.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "internals.h"
static void __iomem *uimask;
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c07b4a85253f..75bdbb2c5140 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -2,10 +2,12 @@ menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/actions/Kconfig"
source "drivers/soc/amlogic/Kconfig"
+source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
+source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 90b686e586c6..524ecdc2a9bb 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
+obj-$(CONFIG_SOC_ASPEED) += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_DOVE) += dove/
@@ -11,6 +12,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-$(CONFIG_ARCH_MXC) += imx/
+obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
obj-y += amlogic/
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index 6289965c42e9..511b6856225d 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -26,6 +27,7 @@
#define HHI_MEM_PD_REG0 (0x40 << 2)
#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
+#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
struct meson_gx_pwrc_vpu {
struct generic_pm_domain genpd;
@@ -54,12 +56,55 @@ static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
/* Power Down Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x2 << i, 0x3 << i);
+ 0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x2 << i, 0x3 << i);
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), BIT(i));
+ udelay(5);
+ }
+ udelay(20);
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
+
+ msleep(20);
+
+ clk_disable_unprepare(pd->vpu_clk);
+ clk_disable_unprepare(pd->vapb_clk);
+
+ return 0;
+}
+
+static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
+ udelay(20);
+
+ /* Power Down Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
+ 0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 8; i < 16; i++) {
@@ -108,13 +153,67 @@ static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
/* Power Up Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x2 << i, 0);
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), 0);
+ udelay(5);
+ }
+ udelay(20);
+
+ ret = reset_control_assert(pd->rstc);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, 0);
+
+ ret = reset_control_deassert(pd->rstc);
+ if (ret)
+ return ret;
+
+ ret = meson_gx_pwrc_vpu_setup_clk(pd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int ret;
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, 0);
+ udelay(20);
+
+ /* Power Up Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x2 << i, 0);
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
+ 0x3 << i, 0);
udelay(5);
}
@@ -160,15 +259,37 @@ static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
},
};
+static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = {
+ .genpd = {
+ .name = "vpu_hdmi",
+ .power_off = meson_g12a_pwrc_vpu_power_off,
+ .power_on = meson_g12a_pwrc_vpu_power_on,
+ },
+};
+
static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
{
+ const struct meson_gx_pwrc_vpu *vpu_pd_match;
struct regmap *regmap_ao, *regmap_hhi;
+ struct meson_gx_pwrc_vpu *vpu_pd;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
bool powered_off;
int ret;
+ vpu_pd_match = of_device_get_match_data(&pdev->dev);
+ if (!vpu_pd_match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
+ vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL);
+ if (!vpu_pd)
+ return -ENOMEM;
+
+ memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
+
regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get regmap\n");
@@ -201,39 +322,46 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
return PTR_ERR(vapb_clk);
}
- vpu_hdmi_pd.regmap_ao = regmap_ao;
- vpu_hdmi_pd.regmap_hhi = regmap_hhi;
- vpu_hdmi_pd.rstc = rstc;
- vpu_hdmi_pd.vpu_clk = vpu_clk;
- vpu_hdmi_pd.vapb_clk = vapb_clk;
+ vpu_pd->regmap_ao = regmap_ao;
+ vpu_pd->regmap_hhi = regmap_hhi;
+ vpu_pd->rstc = rstc;
+ vpu_pd->vpu_clk = vpu_clk;
+ vpu_pd->vapb_clk = vapb_clk;
+
+ platform_set_drvdata(pdev, vpu_pd);
- powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+ powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
/* If already powered, sync the clock states */
if (!powered_off) {
- ret = meson_gx_pwrc_vpu_setup_clk(&vpu_hdmi_pd);
+ ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd);
if (ret)
return ret;
}
- pm_genpd_init(&vpu_hdmi_pd.genpd, &pm_domain_always_on_gov,
+ pm_genpd_init(&vpu_pd->genpd, &pm_domain_always_on_gov,
powered_off);
return of_genpd_add_provider_simple(pdev->dev.of_node,
- &vpu_hdmi_pd.genpd);
+ &vpu_pd->genpd);
}
static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
{
+ struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev);
bool powered_off;
- powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+ powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
if (!powered_off)
- meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
+ vpu_pd->genpd.power_off(&vpu_pd->genpd);
}
static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
- { .compatible = "amlogic,meson-gx-pwrc-vpu" },
+ { .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd },
+ {
+ .compatible = "amlogic,meson-g12a-pwrc-vpu",
+ .data = &vpu_hdmi_pd_g12a
+ },
{ /* sentinel */ }
};
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 37ea0a1c24c8..bca34954518e 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -37,26 +37,34 @@ static const struct meson_gx_soc_id {
{ "AXG", 0x25 },
{ "GXLX", 0x26 },
{ "TXHD", 0x27 },
+ { "G12A", 0x28 },
+ { "G12B", 0x29 },
};
static const struct meson_gx_package_id {
const char *name;
unsigned int major_id;
unsigned int pack_id;
+ unsigned int pack_mask;
} soc_packages[] = {
- { "S905", 0x1f, 0 },
- { "S905H", 0x1f, 0x13 },
- { "S905M", 0x1f, 0x20 },
- { "S905D", 0x21, 0 },
- { "S905X", 0x21, 0x80 },
- { "S905W", 0x21, 0xa0 },
- { "S905L", 0x21, 0xc0 },
- { "S905M2", 0x21, 0xe0 },
- { "S912", 0x22, 0 },
- { "962X", 0x24, 0x10 },
- { "962E", 0x24, 0x20 },
- { "A113X", 0x25, 0x37 },
- { "A113D", 0x25, 0x22 },
+ { "S905", 0x1f, 0, 0x20 }, /* pack_id != 0x20 */
+ { "S905H", 0x1f, 0x3, 0xf }, /* pack_id & 0xf == 0x3 */
+ { "S905M", 0x1f, 0x20, 0xf0 }, /* pack_id == 0x20 */
+ { "S905D", 0x21, 0, 0xf0 },
+ { "S905X", 0x21, 0x80, 0xf0 },
+ { "S905W", 0x21, 0xa0, 0xf0 },
+ { "S905L", 0x21, 0xc0, 0xf0 },
+ { "S905M2", 0x21, 0xe0, 0xf0 },
+ { "S805X", 0x21, 0x30, 0xf0 },
+ { "S805Y", 0x21, 0xb0, 0xf0 },
+ { "S912", 0x22, 0, 0x0 }, /* Only S912 is known for GXM */
+ { "962X", 0x24, 0x10, 0xf0 },
+ { "962E", 0x24, 0x20, 0xf0 },
+ { "A113X", 0x25, 0x37, 0xff },
+ { "A113D", 0x25, 0x22, 0xff },
+ { "S905D2", 0x28, 0x10, 0xf0 },
+ { "S905X2", 0x28, 0x40, 0xf0 },
+ { "S922X", 0x29, 0x40, 0xf0 },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
@@ -81,13 +89,14 @@ static inline unsigned int socinfo_to_misc(u32 socinfo)
static const char *socinfo_to_package_id(u32 socinfo)
{
- unsigned int pack = socinfo_to_pack(socinfo) & 0xf0;
+ unsigned int pack = socinfo_to_pack(socinfo);
unsigned int major = socinfo_to_major(socinfo);
int i;
for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) {
if (soc_packages[i].major_id == major &&
- soc_packages[i].pack_id == pack)
+ soc_packages[i].pack_id ==
+ (pack & soc_packages[i].pack_mask))
return soc_packages[i].name;
}
@@ -123,8 +132,10 @@ static int __init meson_gx_socinfo_init(void)
return -ENODEV;
/* check if interface is enabled */
- if (!of_device_is_available(np))
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
/* check if chip-id is available */
if (!of_property_read_bool(np, "amlogic,has-chip-id"))
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
new file mode 100644
index 000000000000..765d10191387
--- /dev/null
+++ b/drivers/soc/aspeed/Kconfig
@@ -0,0 +1,31 @@
+menu "Aspeed SoC drivers"
+
+config SOC_ASPEED
+ def_bool y
+ depends on ARCH_ASPEED || COMPILE_TEST
+
+config ASPEED_LPC_CTRL
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
+ ---help---
+ Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
+ ioctl()s, the driver also provides a read/write interface to a BMC ram
+ region where the host LPC read/write region can be buffered.
+
+config ASPEED_LPC_SNOOP
+ tristate "Aspeed ast2500 HOST LPC snoop support"
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ help
+ Provides a driver to control the LPC snoop interface which
+ allows the BMC to listen on and save the data written by
+ the host to an arbitrary LPC I/O port.
+
+config ASPEED_P2A_CTRL
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
+ help
+ Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
+ ioctl()s, the driver also provides an interface for userspace mappings to
+ a pre-defined region.
+
+endmenu
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
new file mode 100644
index 000000000000..2f7b6da7be79
--- /dev/null
+++ b/drivers/soc/aspeed/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
diff --git a/drivers/misc/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index a024f8042259..a024f8042259 100644
--- a/drivers/misc/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 2feb4347d67f..2feb4347d67f 100644
--- a/drivers/misc/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
diff --git a/drivers/misc/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
index b60fbeaffcbd..b60fbeaffcbd 100644
--- a/drivers/misc/aspeed-p2a-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 506a6f3c2b9b..d6b529e06d9a 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
+obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 7d14a4b4e82a..d9231bd3c691 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -406,7 +406,6 @@ static int imx_gpc_probe(struct platform_device *pdev)
const struct imx_gpc_dt_data *of_id_data = of_id->data;
struct device_node *pgc_node;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
int ret;
@@ -417,8 +416,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
!pgc_node)
return 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -431,10 +429,19 @@ static int imx_gpc_probe(struct platform_device *pdev)
return ret;
}
- /* Disable PU power down in normal operation if ERR009619 is present */
+ /*
+ * Disable PU power down by runtime PM if ERR009619 is present.
+ *
+ * The PRE clock will be paused for several cycles when turning on the
+ * PU domain LDO from power down state. If PRE is in use at that time,
+ * the IPU/PRG cannot get the correct display data from the PRE.
+ *
+ * This is not a concern when the whole system enters suspend state, so
+ * it's safe to power down PU in this case.
+ */
if (of_id_data->err009619_present)
imx_gpc_domains[GPC_PGC_DOMAIN_PU].base.flags |=
- GENPD_FLAG_ALWAYS_ON;
+ GENPD_FLAG_RPM_ALWAYS_ON;
/* Keep DISP always on if ERR006287 is present */
if (of_id_data->err006287_present)
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 176f473127b6..31b8d002d855 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -136,8 +136,8 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ;
const bool enable_power_control = !on;
const bool has_regulator = !IS_ERR(domain->regulator);
- unsigned long deadline;
int i, ret = 0;
+ u32 pxx_req;
regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
domain->bits.map, domain->bits.map);
@@ -169,30 +169,19 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
- deadline = jiffies + msecs_to_jiffies(1);
- while (true) {
- u32 pxx_req;
-
- regmap_read(domain->regmap, offset, &pxx_req);
-
- if (!(pxx_req & domain->bits.pxx))
- break;
-
- if (time_after(jiffies, deadline)) {
- dev_err(domain->dev, "falied to command PGC\n");
- ret = -ETIMEDOUT;
- /*
- * If we were in a process of enabling a
- * domain and failed we might as well disable
- * the regulator we just enabled. And if it
- * was the opposite situation and we failed to
- * power down -- keep the regulator on
- */
- on = !on;
- break;
- }
-
- cpu_relax();
+ ret = regmap_read_poll_timeout(domain->regmap, offset, pxx_req,
+ !(pxx_req & domain->bits.pxx),
+ 0, USEC_PER_MSEC);
+ if (ret) {
+ dev_err(domain->dev, "failed to command PGC\n");
+ /*
+ * If we were in a process of enabling a
+ * domain and failed we might as well disable
+ * the regulator we just enabled. And if it
+ * was the opposite situation and we failed to
+ * power down -- keep the regulator on
+ */
+ on = !on;
}
if (enable_power_control)
@@ -574,7 +563,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *pgc_np, *np;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
int ret;
@@ -584,8 +572,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
new file mode 100644
index 000000000000..fc6429f9170a
--- /dev/null
+++ b/drivers/soc/imx/soc-imx8.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#define REV_B1 0x21
+
+#define IMX8MQ_SW_INFO_B1 0x40
+#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+
+struct imx8_soc_data {
+ char *name;
+ u32 (*soc_revision)(void);
+};
+
+static u32 __init imx8mq_soc_revision(void)
+{
+ struct device_node *np;
+ void __iomem *ocotp_base;
+ u32 magic;
+ u32 rev = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
+ if (!np)
+ goto out;
+
+ ocotp_base = of_iomap(np, 0);
+ WARN_ON(!ocotp_base);
+
+ magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
+ if (magic == IMX8MQ_SW_MAGIC_B1)
+ rev = REV_B1;
+
+ iounmap(ocotp_base);
+
+out:
+ of_node_put(np);
+ return rev;
+}
+
+static const struct imx8_soc_data imx8mq_soc_data = {
+ .name = "i.MX8MQ",
+ .soc_revision = imx8mq_soc_revision,
+};
+
+static const struct of_device_id imx8_soc_match[] = {
+ { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
+ { }
+};
+
+#define imx8_revision(soc_rev) \
+ soc_rev ? \
+ kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
+ "unknown"
+
+static int __init imx8_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ const struct of_device_id *id;
+ u32 soc_rev = 0;
+ const struct imx8_soc_data *data;
+ int ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ if (ret)
+ goto free_soc;
+
+ id = of_match_node(imx8_soc_match, root);
+ if (!id)
+ goto free_soc;
+
+ of_node_put(root);
+
+ data = id->data;
+ if (data) {
+ soc_dev_attr->soc_id = data->name;
+ if (data->soc_revision)
+ soc_rev = data->soc_revision();
+ }
+
+ soc_dev_attr->revision = imx8_revision(soc_rev);
+ if (!soc_dev_attr->revision)
+ goto free_soc;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ goto free_rev;
+
+ return 0;
+
+free_rev:
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ of_node_put(root);
+ return -ENODEV;
+}
+device_initcall(imx8_soc_init);
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
new file mode 100644
index 000000000000..de6becdc78a2
--- /dev/null
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -0,0 +1,16 @@
+menu "IXP4xx SoC drivers"
+
+config IXP4XX_QMGR
+ tristate "IXP4xx Queue Manager support"
+ help
+ This driver supports IXP4xx built-in hardware queue manager
+ and is automatically selected by Ethernet and HSS drivers.
+
+config IXP4XX_NPE
+ tristate "IXP4xx Network Processor Engine support"
+ select FW_LOADER
+ help
+ This driver supports IXP4xx built-in network coprocessors
+ and is automatically selected by Ethernet and HSS drivers.
+
+endmenu
diff --git a/drivers/soc/ixp4xx/Makefile b/drivers/soc/ixp4xx/Makefile
new file mode 100644
index 000000000000..d20d99e6df65
--- /dev/null
+++ b/drivers/soc/ixp4xx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx-qmgr.o
+obj-$(CONFIG_IXP4XX_NPE) += ixp4xx-npe.o
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
new file mode 100644
index 000000000000..15979d4376ab
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -0,0 +1,762 @@
+/*
+ * Intel IXP4xx Network Processor Engine driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The code is based on publicly available information:
+ * - Intel IXP4xx Developer's Manual and other e-papers
+ * - Intel IXP400 Access Library Software (BSD license)
+ * - previous works by Christian Hohnstaedt <chohnstaedt@innominate.com>
+ * Thanks, Christian.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/npe.h>
+
+#define DEBUG_MSG 0
+#define DEBUG_FW 0
+
+#define NPE_COUNT 3
+#define MAX_RETRIES 1000 /* microseconds */
+#define NPE_42X_DATA_SIZE 0x800 /* in dwords */
+#define NPE_46X_DATA_SIZE 0x1000
+#define NPE_A_42X_INSTR_SIZE 0x1000
+#define NPE_B_AND_C_42X_INSTR_SIZE 0x800
+#define NPE_46X_INSTR_SIZE 0x1000
+#define REGS_SIZE 0x1000
+
+#define NPE_PHYS_REG 32
+
+#define FW_MAGIC 0xFEEDF00D
+#define FW_BLOCK_TYPE_INSTR 0x0
+#define FW_BLOCK_TYPE_DATA 0x1
+#define FW_BLOCK_TYPE_EOF 0xF
+
+/* NPE exec status (read) and command (write) */
+#define CMD_NPE_STEP 0x01
+#define CMD_NPE_START 0x02
+#define CMD_NPE_STOP 0x03
+#define CMD_NPE_CLR_PIPE 0x04
+#define CMD_CLR_PROFILE_CNT 0x0C
+#define CMD_RD_INS_MEM 0x10 /* instruction memory */
+#define CMD_WR_INS_MEM 0x11
+#define CMD_RD_DATA_MEM 0x12 /* data memory */
+#define CMD_WR_DATA_MEM 0x13
+#define CMD_RD_ECS_REG 0x14 /* exec access register */
+#define CMD_WR_ECS_REG 0x15
+
+#define STAT_RUN 0x80000000
+#define STAT_STOP 0x40000000
+#define STAT_CLEAR 0x20000000
+#define STAT_ECS_K 0x00800000 /* pipeline clean */
+
+#define NPE_STEVT 0x1B
+#define NPE_STARTPC 0x1C
+#define NPE_REGMAP 0x1E
+#define NPE_CINDEX 0x1F
+
+#define INSTR_WR_REG_SHORT 0x0000C000
+#define INSTR_WR_REG_BYTE 0x00004000
+#define INSTR_RD_FIFO 0x0F888220
+#define INSTR_RESET_MBOX 0x0FAC8210
+
+#define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */
+#define ECS_BG_CTXT_REG_1 0x01 /* Stack level */
+#define ECS_BG_CTXT_REG_2 0x02
+#define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */
+#define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */
+#define ECS_PRI_1_CTXT_REG_2 0x06
+#define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */
+#define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */
+#define ECS_PRI_2_CTXT_REG_2 0x0A
+#define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */
+#define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */
+#define ECS_DBG_CTXT_REG_2 0x0E
+#define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */
+
+#define ECS_REG_0_ACTIVE 0x80000000 /* all levels */
+#define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */
+#define ECS_REG_0_LDUR_BITS 8
+#define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */
+#define ECS_REG_1_CCTXT_BITS 16
+#define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */
+#define ECS_REG_1_SELCTXT_BITS 0
+#define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */
+#define ECS_DBG_REG_2_IF 0x00100000 /* debug level */
+#define ECS_DBG_REG_2_IE 0x00080000 /* debug level */
+
+/* NPE watchpoint_fifo register bit */
+#define WFIFO_VALID 0x80000000
+
+/* NPE messaging_status register bit definitions */
+#define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */
+#define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */
+#define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */
+#define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */
+#define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */
+#define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */
+#define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */
+#define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */
+
+/* NPE messaging_control register bit definitions */
+#define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */
+#define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */
+#define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */
+#define MSGCTL_IN_FIFO_WRITE 0x02000000
+
+/* NPE mailbox_status value for reset */
+#define RESET_MBOX_STAT 0x0000F0F0
+
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
+
+#define print_npe(pri, npe, fmt, ...) \
+ printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
+
+#if DEBUG_MSG
+#define debug_msg(npe, fmt, ...) \
+ print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__)
+#else
+#define debug_msg(npe, fmt, ...)
+#endif
+
+static struct {
+ u32 reg, val;
+} ecs_reset[] = {
+ { ECS_BG_CTXT_REG_0, 0xA0000000 },
+ { ECS_BG_CTXT_REG_1, 0x01000000 },
+ { ECS_BG_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_1_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_1_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_1_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_2_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_2_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_2_CTXT_REG_2, 0x00008000 },
+ { ECS_DBG_CTXT_REG_0, 0x20000000 },
+ { ECS_DBG_CTXT_REG_1, 0x00000000 },
+ { ECS_DBG_CTXT_REG_2, 0x001E0000 },
+ { ECS_INSTRUCT_REG, 0x1003C00F },
+};
+
+static struct npe npe_tab[NPE_COUNT] = {
+ {
+ .id = 0,
+ }, {
+ .id = 1,
+ }, {
+ .id = 2,
+ }
+};
+
+int npe_running(struct npe *npe)
+{
+ return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0;
+}
+
+static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data)
+{
+ __raw_writel(data, &npe->regs->exec_data);
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+}
+
+static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd)
+{
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+ /* Iintroduce extra read cycles after issuing read command to NPE
+ so that we read the register after the NPE has updated it.
+ This is to overcome race condition between XScale and NPE */
+ __raw_readl(&npe->regs->exec_data);
+ __raw_readl(&npe->regs->exec_data);
+ return __raw_readl(&npe->regs->exec_data);
+}
+
+static void npe_clear_active(struct npe *npe, u32 reg)
+{
+ u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE);
+}
+
+static void npe_start(struct npe *npe)
+{
+ /* ensure only Background Context Stack Level is active */
+ npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0);
+ npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0);
+ npe_clear_active(npe, ECS_DBG_CTXT_REG_0);
+
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd);
+}
+
+static void npe_stop(struct npe *npe)
+{
+ __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/
+}
+
+static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx,
+ u32 ldur)
+{
+ u32 wc;
+ int i;
+
+ /* set the Active bit, and the LDUR, in the debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG,
+ ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS));
+
+ /* set CCTXT at ECS DEBUG L3 to specify in which context to execute
+ the instruction, and set SELCTXT at ECS DEBUG Level to specify
+ which context store to access.
+ Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
+ */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG,
+ (ctx << ECS_REG_1_CCTXT_BITS) |
+ (ctx << ECS_REG_1_SELCTXT_BITS));
+
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+
+ /* load NPE instruction into the instruction register */
+ npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr);
+
+ /* we need this value later to wait for completion of NPE execution
+ step */
+ wc = __raw_readl(&npe->regs->watch_count);
+
+ /* issue a Step One command via the Execution Control register */
+ __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd);
+
+ /* Watch Count register increments when NPE completes an instruction */
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (wc != __raw_readl(&npe->regs->watch_count))
+ return 0;
+ udelay(1);
+ }
+
+ print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr,
+ u8 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov8 d0, #0 */
+ u32 instr = INSTR_WR_REG_BYTE | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr,
+ u16 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov16 d0, #0 */
+ u32 instr = INSTR_WR_REG_SHORT | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr,
+ u32 val, u32 ctx)
+{
+ /* write in 16 bit steps first the high and then the low value */
+ if (npe_logical_reg_write16(npe, addr, val >> 16, ctx))
+ return -ETIMEDOUT;
+ return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx);
+}
+
+static int npe_reset(struct npe *npe)
+{
+ u32 val, ctl, exec_count, ctx_reg2;
+ int i;
+
+ ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) &
+ 0x3F3FFFFF;
+
+ /* disable parity interrupt */
+ __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control);
+
+ /* pre exec - debug instruction */
+ /* turn off the halt bit by clearing Execution Count register. */
+ exec_count = __raw_readl(&npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->exec_count);
+ /* ensure that IF and IE are on (temporarily), so that we don't end up
+ stepping forever */
+ ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 |
+ ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE);
+
+ /* clear the FIFOs */
+ while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID)
+ ;
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE)
+ /* read from the outFIFO until empty */
+ print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n",
+ __raw_readl(&npe->regs->in_out_fifo));
+
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)
+ /* step execution of the NPE intruction to read inFIFO using
+ the Debug Executing Context stack */
+ if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0))
+ return -ETIMEDOUT;
+
+ /* reset the mailbox reg from the XScale side */
+ __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status);
+ /* from NPE side */
+ if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0))
+ return -ETIMEDOUT;
+
+ /* Reset the physical registers in the NPE register file */
+ for (val = 0; val < NPE_PHYS_REG; val++) {
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0))
+ return -ETIMEDOUT;
+ /* address is either 0 or 4 */
+ if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0))
+ return -ETIMEDOUT;
+ }
+
+ /* Reset the context store = each context's Context Store registers */
+
+ /* Context 0 has no STARTPC. Instead, this value is used to set NextPC
+ for Background ECS, to set where NPE starts executing code */
+ val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG);
+ val &= ~ECS_REG_0_NEXTPC_MASK;
+ val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK;
+ npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val);
+
+ for (i = 0; i < 16; i++) {
+ if (i) { /* Context 0 has no STEVT nor STARTPC */
+ /* STEVT = off, 0x80 */
+ if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i))
+ return -ETIMEDOUT;
+ }
+ /* REGMAP = d0->p0, d8->p2, d16->p4 */
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i))
+ return -ETIMEDOUT;
+ }
+
+ /* post exec */
+ /* clear active bit in debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0);
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ /* restore previous values */
+ __raw_writel(exec_count, &npe->regs->exec_count);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2);
+
+ /* write reset values to Execution Context Stack registers */
+ for (val = 0; val < ARRAY_SIZE(ecs_reset); val++)
+ npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG,
+ ecs_reset[val].val);
+
+ /* clear the profile counter */
+ __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd);
+
+ __raw_writel(0, &npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->action_points[0]);
+ __raw_writel(0, &npe->regs->action_points[1]);
+ __raw_writel(0, &npe->regs->action_points[2]);
+ __raw_writel(0, &npe->regs->action_points[3]);
+ __raw_writel(0, &npe->regs->watch_count);
+
+ val = ixp4xx_read_feature_bits();
+ /* reset the NPE */
+ ixp4xx_write_feature_bits(val &
+ ~(IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ /* deassert reset */
+ ixp4xx_write_feature_bits(val |
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id))
+ break; /* NPE is back alive */
+ udelay(1);
+ }
+ if (i == MAX_RETRIES)
+ return -ETIMEDOUT;
+
+ npe_stop(npe);
+
+ /* restore NPE configuration bus Control Register - parity settings */
+ __raw_writel(ctl, &npe->regs->messaging_control);
+ return 0;
+}
+
+
+int npe_send_message(struct npe *npe, const void *msg, const char *what)
+{
+ const u32 *send = msg;
+ int cycles = 0;
+
+ debug_msg(npe, "Trying to send message %s [%08X:%08X]\n",
+ what, send[0], send[1]);
+
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) {
+ debug_msg(npe, "NPE input FIFO not empty\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[0], &npe->regs->in_out_fifo);
+
+ if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) {
+ debug_msg(npe, "NPE input FIFO full\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[1], &npe->regs->in_out_fifo);
+
+ while ((cycles < MAX_RETRIES) &&
+ (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) {
+ udelay(1);
+ cycles++;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout sending message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Sending a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ u32 *recv = msg;
+ int cycles = 0, cnt = 0;
+
+ debug_msg(npe, "Trying to receive message %s\n", what);
+
+ while (cycles < MAX_RETRIES) {
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) {
+ recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo);
+ if (cnt == 2)
+ break;
+ } else {
+ udelay(1);
+ cycles++;
+ }
+ }
+
+ switch(cnt) {
+ case 1:
+ debug_msg(npe, "Received [%08X]\n", recv[0]);
+ break;
+ case 2:
+ debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]);
+ break;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout waiting for message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Receiving a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_send_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ int result;
+ u32 *send = msg, recv[2];
+
+ if ((result = npe_send_message(npe, msg, what)) != 0)
+ return result;
+ if ((result = npe_recv_message(npe, recv, what)) != 0)
+ return result;
+
+ if ((recv[0] != send[0]) || (recv[1] != send[1])) {
+ debug_msg(npe, "Message %s: unexpected message received\n",
+ what);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+int npe_load_firmware(struct npe *npe, const char *name, struct device *dev)
+{
+ const struct firmware *fw_entry;
+
+ struct dl_block {
+ u32 type;
+ u32 offset;
+ } *blk;
+
+ struct dl_image {
+ u32 magic;
+ u32 id;
+ u32 size;
+ union {
+ u32 data[0];
+ struct dl_block blocks[0];
+ };
+ } *image;
+
+ struct dl_codeblock {
+ u32 npe_addr;
+ u32 size;
+ u32 data[0];
+ } *cb;
+
+ int i, j, err, data_size, instr_size, blocks, table_end;
+ u32 cmd;
+
+ if ((err = request_firmware(&fw_entry, name, dev)) != 0)
+ return err;
+
+ err = -EINVAL;
+ if (fw_entry->size < sizeof(struct dl_image)) {
+ print_npe(KERN_ERR, npe, "incomplete firmware file\n");
+ goto err;
+ }
+ image = (struct dl_image*)fw_entry->data;
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n",
+ image->magic, image->id, image->size, image->size * 4);
+#endif
+
+ if (image->magic == swab32(FW_MAGIC)) { /* swapped file */
+ image->id = swab32(image->id);
+ image->size = swab32(image->size);
+ } else if (image->magic != FW_MAGIC) {
+ print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n",
+ image->magic);
+ goto err;
+ }
+ if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) {
+ print_npe(KERN_ERR, npe,
+ "inconsistent size of firmware file\n");
+ goto err;
+ }
+ if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) {
+ print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n");
+ goto err;
+ }
+ if (image->magic == swab32(FW_MAGIC))
+ for (i = 0; i < image->size; i++)
+ image->data[i] = swab32(image->data[i]);
+
+ if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) {
+ print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on "
+ "IXP42x\n");
+ goto err;
+ }
+
+ if (npe_running(npe)) {
+ print_npe(KERN_INFO, npe, "unable to load firmware, NPE is "
+ "already running\n");
+ err = -EBUSY;
+ goto err;
+ }
+#if 0
+ npe_stop(npe);
+ npe_reset(npe);
+#endif
+
+ print_npe(KERN_INFO, npe, "firmware functionality 0x%X, "
+ "revision 0x%X:%X\n", (image->id >> 16) & 0xFF,
+ (image->id >> 8) & 0xFF, image->id & 0xFF);
+
+ if (cpu_is_ixp42x()) {
+ if (!npe->id)
+ instr_size = NPE_A_42X_INSTR_SIZE;
+ else
+ instr_size = NPE_B_AND_C_42X_INSTR_SIZE;
+ data_size = NPE_42X_DATA_SIZE;
+ } else {
+ instr_size = NPE_46X_INSTR_SIZE;
+ data_size = NPE_46X_DATA_SIZE;
+ }
+
+ for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size;
+ blocks++)
+ if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF)
+ break;
+ if (blocks * sizeof(struct dl_block) / 4 >= image->size) {
+ print_npe(KERN_INFO, npe, "firmware EOF block marker not "
+ "found\n");
+ goto err;
+ }
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks);
+#endif
+
+ table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */;
+ for (i = 0, blk = image->blocks; i < blocks; i++, blk++) {
+ if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4
+ || blk->offset < table_end) {
+ print_npe(KERN_INFO, npe, "invalid offset 0x%X of "
+ "firmware block #%i\n", blk->offset, i);
+ goto err;
+ }
+
+ cb = (struct dl_codeblock*)&image->data[blk->offset];
+ if (blk->type == FW_BLOCK_TYPE_INSTR) {
+ if (cb->npe_addr + cb->size > instr_size)
+ goto too_big;
+ cmd = CMD_WR_INS_MEM;
+ } else if (blk->type == FW_BLOCK_TYPE_DATA) {
+ if (cb->npe_addr + cb->size > data_size)
+ goto too_big;
+ cmd = CMD_WR_DATA_MEM;
+ } else {
+ print_npe(KERN_INFO, npe, "invalid firmware block #%i "
+ "type 0x%X\n", i, blk->type);
+ goto err;
+ }
+ if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) {
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't "
+ "fit in firmware image: type %c, start 0x%X,"
+ " length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+ goto err;
+ }
+
+ for (j = 0; j < cb->size; j++)
+ npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]);
+ }
+
+ npe_start(npe);
+ if (!npe_running(npe))
+ print_npe(KERN_ERR, npe, "unable to start\n");
+ release_firmware(fw_entry);
+ return 0;
+
+too_big:
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE "
+ "memory: type %c, start 0x%X, length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+err:
+ release_firmware(fw_entry);
+ return err;
+}
+
+
+struct npe *npe_request(unsigned id)
+{
+ if (id < NPE_COUNT)
+ if (npe_tab[id].valid)
+ if (try_module_get(THIS_MODULE))
+ return &npe_tab[id];
+ return NULL;
+}
+
+void npe_release(struct npe *npe)
+{
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_npe_probe(struct platform_device *pdev)
+{
+ int i, found = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ for (i = 0; i < NPE_COUNT; i++) {
+ struct npe *npe = &npe_tab[i];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ return -ENODEV;
+
+ if (!(ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << i))) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
+ i, res->start, res->end);
+ continue; /* NPE already disabled or not present */
+ }
+ npe->regs = devm_ioremap_resource(dev, res);
+ if (!npe->regs)
+ return -ENOMEM;
+
+ if (npe_reset(npe)) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
+ i, res->start, res->end);
+ continue;
+ }
+ npe->valid = 1;
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
+ i, res->start, res->end);
+ found++;
+ }
+
+ if (!found)
+ return -ENODEV;
+ return 0;
+}
+
+static int ixp4xx_npe_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < NPE_COUNT; i++)
+ if (npe_tab[i].regs) {
+ npe_reset(&npe_tab[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_npe_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-network-processing-engine",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_npe_driver = {
+ .driver = {
+ .name = "ixp4xx-npe",
+ .of_match_table = of_match_ptr(ixp4xx_npe_of_match),
+ },
+ .probe = ixp4xx_npe_probe,
+ .remove = ixp4xx_npe_remove,
+};
+module_platform_driver(ixp4xx_npe_driver);
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
+
+EXPORT_SYMBOL(npe_names);
+EXPORT_SYMBOL(npe_running);
+EXPORT_SYMBOL(npe_request);
+EXPORT_SYMBOL(npe_release);
+EXPORT_SYMBOL(npe_load_firmware);
+EXPORT_SYMBOL(npe_send_message);
+EXPORT_SYMBOL(npe_recv_message);
+EXPORT_SYMBOL(npe_send_recv_message);
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
new file mode 100644
index 000000000000..13a8a13c9b01
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -0,0 +1,488 @@
+/*
+ * Intel IXP4xx Queue Manager driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+
+static struct qmgr_regs __iomem *qmgr_regs;
+static int qmgr_irq_1;
+static int qmgr_irq_2;
+static spinlock_t qmgr_lock;
+static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
+static void (*irq_handlers[QUEUES])(void *pdev);
+static void *irq_pdevs[QUEUES];
+
+#if DEBUG_QMGR
+char qmgr_queue_descs[QUEUES][32];
+#endif
+
+void qmgr_put_entry(unsigned int queue, u32 val)
+{
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) put %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ __raw_writel(val, &qmgr_regs->acc[queue][0]);
+}
+
+u32 qmgr_get_entry(unsigned int queue)
+{
+ u32 val;
+ val = __raw_readl(&qmgr_regs->acc[queue][0]);
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) get %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ return val;
+}
+
+static int __qmgr_get_stat1(unsigned int queue)
+{
+ return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
+ >> ((queue & 7) << 2)) & 0xF;
+}
+
+static int __qmgr_get_stat2(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
+ >> ((queue & 0xF) << 1)) & 0x3;
+}
+
+/**
+ * qmgr_stat_empty() - checks if a hardware queue is empty
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is empty.
+ */
+int qmgr_stat_empty(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
+}
+
+/**
+ * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is below low watermark.
+ */
+int qmgr_stat_below_low_watermark(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statne_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
+}
+
+/**
+ * qmgr_stat_full() - checks if a hardware queue is full
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is full.
+ */
+int qmgr_stat_full(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statf_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
+}
+
+/**
+ * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue experienced overflow.
+ */
+int qmgr_stat_overflow(unsigned int queue)
+{
+ return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
+}
+
+void qmgr_set_irq(unsigned int queue, int src,
+ void (*handler)(void *pdev), void *pdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ if (queue < HALF_QUEUES) {
+ u32 __iomem *reg;
+ int bit;
+ BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
+ reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
+ bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
+ __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
+ reg);
+ } else
+ /* IRQ source for queues 32-63 is fixed */
+ BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
+
+ irq_handlers[queue] = handler;
+ irq_pdevs[queue] = pdev;
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+
+static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 en_bitmap, src, stat;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
+
+ en_bitmap = qmgr_regs->irqen[0];
+ while (en_bitmap) {
+ i = __fls(en_bitmap); /* number of the last "low" queue */
+ en_bitmap &= ~BIT(i);
+ src = qmgr_regs->irqsrc[i >> 3];
+ stat = qmgr_regs->stat1[i >> 3];
+ if (src & 4) /* the IRQ condition is inverted */
+ stat = ~stat;
+ if (stat & BIT(src & 3)) {
+ irq_handlers[i](irq_pdevs[i]);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 req_bitmap;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
+
+ req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last "high" queue */
+ req_bitmap &= ~BIT(i);
+ irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq(int irq, void *pdev)
+{
+ int i, half = (irq == qmgr_irq_1 ? 0 : 1);
+ u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
+
+ if (!req_bitmap)
+ return 0;
+ __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
+
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last queue */
+ req_bitmap &= ~BIT(i);
+ i += half * HALF_QUEUES;
+ irq_handlers[i](irq_pdevs[i]);
+ }
+ return IRQ_HANDLED;
+}
+
+
+void qmgr_enable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
+ &qmgr_regs->irqen[half]);
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+void qmgr_disable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
+ &qmgr_regs->irqen[half]);
+ __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+static inline void shift_mask(u32 *mask)
+{
+ mask[3] = mask[3] << 1 | mask[2] >> 31;
+ mask[2] = mask[2] << 1 | mask[1] >> 31;
+ mask[1] = mask[1] << 1 | mask[0] >> 31;
+ mask[0] <<= 1;
+}
+
+#if DEBUG_QMGR
+int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark,
+ const char *desc_format, const char* name)
+#else
+int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark)
+#endif
+{
+ u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
+ int err;
+
+ BUG_ON(queue >= QUEUES);
+
+ if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
+ return -EINVAL;
+
+ switch (len) {
+ case 16:
+ cfg = 0 << 24;
+ mask[0] = 0x1;
+ break;
+ case 32:
+ cfg = 1 << 24;
+ mask[0] = 0x3;
+ break;
+ case 64:
+ cfg = 2 << 24;
+ mask[0] = 0xF;
+ break;
+ case 128:
+ cfg = 3 << 24;
+ mask[0] = 0xFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cfg |= nearly_empty_watermark << 26;
+ cfg |= nearly_full_watermark << 29;
+ len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
+ mask[1] = mask[2] = mask[3] = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ spin_lock_irq(&qmgr_lock);
+ if (__raw_readl(&qmgr_regs->sram[queue])) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ while (1) {
+ if (!(used_sram_bitmap[0] & mask[0]) &&
+ !(used_sram_bitmap[1] & mask[1]) &&
+ !(used_sram_bitmap[2] & mask[2]) &&
+ !(used_sram_bitmap[3] & mask[3]))
+ break; /* found free space */
+
+ addr++;
+ shift_mask(mask);
+ if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
+ printk(KERN_ERR "qmgr: no free SRAM space for"
+ " queue %i\n", queue);
+ err = -ENOMEM;
+ goto err;
+ }
+ }
+
+ used_sram_bitmap[0] |= mask[0];
+ used_sram_bitmap[1] |= mask[1];
+ used_sram_bitmap[2] |= mask[2];
+ used_sram_bitmap[3] |= mask[3];
+ __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
+#if DEBUG_QMGR
+ snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
+ desc_format, name);
+ printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
+ qmgr_queue_descs[queue], queue, addr);
+#endif
+ spin_unlock_irq(&qmgr_lock);
+ return 0;
+
+err:
+ spin_unlock_irq(&qmgr_lock);
+ module_put(THIS_MODULE);
+ return err;
+}
+
+void qmgr_release_queue(unsigned int queue)
+{
+ u32 cfg, addr, mask[4];
+
+ BUG_ON(queue >= QUEUES); /* not in valid range */
+
+ spin_lock_irq(&qmgr_lock);
+ cfg = __raw_readl(&qmgr_regs->sram[queue]);
+ addr = (cfg >> 14) & 0xFF;
+
+ BUG_ON(!addr); /* not requested */
+
+ switch ((cfg >> 24) & 3) {
+ case 0: mask[0] = 0x1; break;
+ case 1: mask[0] = 0x3; break;
+ case 2: mask[0] = 0xF; break;
+ case 3: mask[0] = 0xFF; break;
+ }
+
+ mask[1] = mask[2] = mask[3] = 0;
+
+ while (addr--)
+ shift_mask(mask);
+
+#if DEBUG_QMGR
+ printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
+ qmgr_queue_descs[queue], queue);
+ qmgr_queue_descs[queue][0] = '\x0';
+#endif
+
+ while ((addr = qmgr_get_entry(queue)))
+ printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+ queue, addr);
+
+ __raw_writel(0, &qmgr_regs->sram[queue]);
+
+ used_sram_bitmap[0] &= ~mask[0];
+ used_sram_bitmap[1] &= ~mask[1];
+ used_sram_bitmap[2] &= ~mask[2];
+ used_sram_bitmap[3] &= ~mask[3];
+ irq_handlers[queue] = NULL; /* catch IRQ bugs */
+ spin_unlock_irq(&qmgr_lock);
+
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_qmgr_probe(struct platform_device *pdev)
+{
+ int i, err;
+ irq_handler_t handler1, handler2;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq1, irq2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ qmgr_regs = devm_ioremap_resource(dev, res);
+ if (!qmgr_regs)
+ return -ENOMEM;
+
+ irq1 = platform_get_irq(pdev, 0);
+ if (irq1 <= 0)
+ return irq1 ? irq1 : -EINVAL;
+ qmgr_irq_1 = irq1;
+ irq2 = platform_get_irq(pdev, 1);
+ if (irq2 <= 0)
+ return irq2 ? irq2 : -EINVAL;
+ qmgr_irq_2 = irq2;
+
+ /* reset qmgr registers */
+ for (i = 0; i < 4; i++) {
+ __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
+ __raw_writel(0, &qmgr_regs->irqsrc[i]);
+ }
+ for (i = 0; i < 2; i++) {
+ __raw_writel(0, &qmgr_regs->stat2[i]);
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
+ __raw_writel(0, &qmgr_regs->irqen[i]);
+ }
+
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
+ __raw_writel(0, &qmgr_regs->statf_h);
+
+ for (i = 0; i < QUEUES; i++)
+ __raw_writel(0, &qmgr_regs->sram[i]);
+
+ if (cpu_is_ixp42x_rev_a0()) {
+ handler1 = qmgr_irq1_a0;
+ handler2 = qmgr_irq2_a0;
+ } else
+ handler1 = handler2 = qmgr_irq;
+
+ err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq1, err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq2, err);
+ return err;
+ }
+
+ used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
+ spin_lock_init(&qmgr_lock);
+
+ dev_info(dev, "IXP4xx Queue Manager initialized.\n");
+ return 0;
+}
+
+static int ixp4xx_qmgr_remove(struct platform_device *pdev)
+{
+ synchronize_irq(qmgr_irq_1);
+ synchronize_irq(qmgr_irq_2);
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_qmgr_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-ahb-queue-manager",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_qmgr_driver = {
+ .driver = {
+ .name = "ixp4xx-qmgr",
+ .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
+ },
+ .probe = ixp4xx_qmgr_probe,
+ .remove = ixp4xx_qmgr_remove,
+};
+module_platform_driver(ixp4xx_qmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Krzysztof Halasa");
+
+EXPORT_SYMBOL(qmgr_put_entry);
+EXPORT_SYMBOL(qmgr_get_entry);
+EXPORT_SYMBOL(qmgr_stat_empty);
+EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
+EXPORT_SYMBOL(qmgr_stat_full);
+EXPORT_SYMBOL(qmgr_stat_overflow);
+EXPORT_SYMBOL(qmgr_set_irq);
+EXPORT_SYMBOL(qmgr_enable_irq);
+EXPORT_SYMBOL(qmgr_disable_irq);
+#if DEBUG_QMGR
+EXPORT_SYMBOL(qmgr_queue_descs);
+EXPORT_SYMBOL(qmgr_request_queue);
+#else
+EXPORT_SYMBOL(__qmgr_request_queue);
+#endif
+EXPORT_SYMBOL(qmgr_release_queue);
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 8236a6c87e19..c4449a163991 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -381,6 +381,10 @@ enum pwrap_regs {
PWRAP_EXT_GPS_AUXADC_RDATA_ADDR,
PWRAP_GPSINF_0_STA,
PWRAP_GPSINF_1_STA,
+
+ /* MT8516 only regs */
+ PWRAP_OP_TYPE,
+ PWRAP_MSB_FIRST,
};
static int mt2701_regs[] = {
@@ -852,6 +856,91 @@ static int mt8183_regs[] = {
[PWRAP_WACS2_VLDCLR] = 0xC28,
};
+static int mt8516_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1c,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2c,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4c,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5c,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6c,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7c,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8c,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9c,
+ [PWRAP_WACS2_CMD] = 0xa0,
+ [PWRAP_WACS2_RDATA] = 0xa4,
+ [PWRAP_WACS2_VLDCLR] = 0xa8,
+ [PWRAP_INT_EN] = 0xac,
+ [PWRAP_INT_FLG_RAW] = 0xb0,
+ [PWRAP_INT_FLG] = 0xb4,
+ [PWRAP_INT_CLR] = 0xb8,
+ [PWRAP_SIG_ADR] = 0xbc,
+ [PWRAP_SIG_MODE] = 0xc0,
+ [PWRAP_SIG_VALUE] = 0xc4,
+ [PWRAP_SIG_ERRVAL] = 0xc8,
+ [PWRAP_CRC_EN] = 0xcc,
+ [PWRAP_TIMER_EN] = 0xd0,
+ [PWRAP_TIMER_STA] = 0xd4,
+ [PWRAP_WDT_UNIT] = 0xd8,
+ [PWRAP_WDT_SRC_EN] = 0xdc,
+ [PWRAP_WDT_FLG] = 0xe0,
+ [PWRAP_DEBUG_INT_SEL] = 0xe4,
+ [PWRAP_DVFS_ADR0] = 0xe8,
+ [PWRAP_DVFS_WDATA0] = 0xec,
+ [PWRAP_DVFS_ADR1] = 0xf0,
+ [PWRAP_DVFS_WDATA1] = 0xf4,
+ [PWRAP_DVFS_ADR2] = 0xf8,
+ [PWRAP_DVFS_WDATA2] = 0xfc,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10c,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11c,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_SPMINF_STA] = 0x128,
+ [PWRAP_CIPHER_KEY_SEL] = 0x12c,
+ [PWRAP_CIPHER_IV_SEL] = 0x130,
+ [PWRAP_CIPHER_EN] = 0x134,
+ [PWRAP_CIPHER_RDY] = 0x138,
+ [PWRAP_CIPHER_MODE] = 0x13c,
+ [PWRAP_CIPHER_SWRST] = 0x140,
+ [PWRAP_DCM_EN] = 0x144,
+ [PWRAP_DCM_DBC_PRD] = 0x148,
+ [PWRAP_SW_RST] = 0x168,
+ [PWRAP_OP_TYPE] = 0x16c,
+ [PWRAP_MSB_FIRST] = 0x170,
+};
+
enum pmic_type {
PMIC_MT6323,
PMIC_MT6351,
@@ -869,6 +958,7 @@ enum pwrap_type {
PWRAP_MT8135,
PWRAP_MT8173,
PWRAP_MT8183,
+ PWRAP_MT8516,
};
struct pmic_wrapper;
@@ -1281,7 +1371,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
static int pwrap_init_cipher(struct pmic_wrapper *wrp)
{
int ret;
- u32 rdata;
+ u32 rdata = 0;
pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
@@ -1297,6 +1387,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
case PWRAP_MT6765:
case PWRAP_MT6797:
case PWRAP_MT8173:
+ case PWRAP_MT8516:
pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
break;
case PWRAP_MT7622:
@@ -1478,7 +1569,8 @@ static int pwrap_init(struct pmic_wrapper *wrp)
{
int ret;
- reset_control_reset(wrp->rstc);
+ if (wrp->rstc)
+ reset_control_reset(wrp->rstc);
if (wrp->rstc_bridge)
reset_control_reset(wrp->rstc_bridge);
@@ -1764,6 +1856,18 @@ static const struct pmic_wrapper_type pwrap_mt8183 = {
.init_soc_specific = pwrap_mt8183_init_soc_specific,
};
+static struct pmic_wrapper_type pwrap_mt8516 = {
+ .regs = mt8516_regs,
+ .type = PWRAP_MT8516,
+ .arb_en_all = 0xff,
+ .int_en_all = ~(u32)(BIT(31) | BIT(2)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_mt2701_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct of_device_id of_pwrap_match_tbl[] = {
{
.compatible = "mediatek,mt2701-pwrap",
@@ -1787,6 +1891,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt8183-pwrap",
.data = &pwrap_mt8183,
}, {
+ .compatible = "mediatek,mt8516-pwrap",
+ .data = &pwrap_mt8516,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index c701b3b010f1..f6c3d17b05c7 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -248,8 +248,8 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
}
cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
- if (IS_ERR_OR_NULL(cmd_db_header)) {
- ret = PTR_ERR(cmd_db_header);
+ if (!cmd_db_header) {
+ ret = -ENOMEM;
cmd_db_header = NULL;
return ret;
}
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index c239a28e503f..f9e309f0acd3 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -345,8 +345,7 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
struct qmi_handle *qmi = txn->qmi;
int ret;
- ret = wait_for_completion_interruptible_timeout(&txn->completion,
- timeout);
+ ret = wait_for_completion_timeout(&txn->completion, timeout);
mutex_lock(&qmi->txn_lock);
mutex_lock(&txn->lock);
@@ -354,9 +353,7 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
mutex_unlock(&txn->lock);
mutex_unlock(&qmi->txn_lock);
- if (ret < 0)
- return ret;
- else if (ret == 0)
+ if (ret == 0)
return -ETIMEDOUT;
else
return txn->result;
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 7200d762a951..6f5e8be9689c 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -137,6 +137,26 @@ static struct class rmtfs_class = {
.name = "rmtfs",
};
+static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data;
+
+ if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
+ dev_dbg(&rmtfs_mem->dev,
+ "vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
+ vma->vm_end, vma->vm_start,
+ (vma->vm_end - vma->vm_start), &rmtfs_mem->size);
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ rmtfs_mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
static const struct file_operations qcom_rmtfs_mem_fops = {
.owner = THIS_MODULE,
.open = qcom_rmtfs_mem_open,
@@ -144,6 +164,7 @@ static const struct file_operations qcom_rmtfs_mem_fops = {
.write = qcom_rmtfs_mem_write,
.release = qcom_rmtfs_mem_release,
.llseek = default_llseek,
+ .mmap = qcom_rmtfs_mem_mmap,
};
static void qcom_rmtfs_mem_release_device(struct device *dev)
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 75bd9a83aef0..e278fc11fe5c 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -459,7 +459,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
do {
slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
i, msg->num_cmds, 0);
- if (slot == tcs->num_tcs * tcs->ncpt)
+ if (slot >= tcs->num_tcs * tcs->ncpt)
return -ENOMEM;
i += tcs->ncpt;
} while (slot + msg->num_cmds - 1 >= i);
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 4af96e668a2f..3299cf5365f3 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -335,6 +335,9 @@ static int __init renesas_soc_init(void)
/* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
if ((product & 0x7fff) == 0x5210)
product ^= 0x11;
+ /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
+ if ((product & 0x7fff) == 0x5211)
+ product ^= 0x12;
if (soc->id && ((product >> 8) & 0xff) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
return -ENODEV;
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 96882ffde67e..3b81e1d75a97 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -66,9 +66,11 @@ static const struct rockchip_grf_info rk3228_grf __initconst = {
};
#define RK3288_GRF_SOC_CON0 0x244
+#define RK3288_GRF_SOC_CON2 0x24c
static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
{ "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
+ { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
};
static const struct rockchip_grf_info rk3288_grf __initconst = {
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 0df258518693..5648e5c09ef5 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -272,6 +272,14 @@ static const char * const tegra30_reset_sources[] = {
"WATCHDOG",
"SENSOR",
"SW_MAIN",
+ "LP0"
+};
+
+static const char * const tegra210_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
"LP0",
"AOTAG"
};
@@ -656,10 +664,15 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
int err;
err = tegra_powergate_power_up(pg, true);
- if (err)
+ if (err) {
dev_err(dev, "failed to turn on PM domain %s: %d\n",
pg->genpd.name, err);
+ goto out;
+ }
+
+ reset_control_release(pg->reset);
+out:
return err;
}
@@ -669,10 +682,18 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain)
struct device *dev = pg->pmc->dev;
int err;
+ err = reset_control_acquire(pg->reset);
+ if (err < 0) {
+ pr_err("failed to acquire resets: %d\n", err);
+ return err;
+ }
+
err = tegra_powergate_power_down(pg);
- if (err)
+ if (err) {
dev_err(dev, "failed to turn off PM domain %s: %d\n",
pg->genpd.name, err);
+ reset_control_release(pg->reset);
+ }
return err;
}
@@ -937,38 +958,53 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
struct device *dev = pg->pmc->dev;
int err;
- pg->reset = of_reset_control_array_get_exclusive(np);
+ pg->reset = of_reset_control_array_get_exclusive_released(np);
if (IS_ERR(pg->reset)) {
err = PTR_ERR(pg->reset);
dev_err(dev, "failed to get device resets: %d\n", err);
return err;
}
- if (off)
+ err = reset_control_acquire(pg->reset);
+ if (err < 0) {
+ pr_err("failed to acquire resets: %d\n", err);
+ goto out;
+ }
+
+ if (off) {
err = reset_control_assert(pg->reset);
- else
+ } else {
err = reset_control_deassert(pg->reset);
+ if (err < 0)
+ goto out;
- if (err)
+ reset_control_release(pg->reset);
+ }
+
+out:
+ if (err) {
+ reset_control_release(pg->reset);
reset_control_put(pg->reset);
+ }
return err;
}
-static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
+static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct device *dev = pmc->dev;
struct tegra_powergate *pg;
- int id, err;
+ int id, err = 0;
bool off;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
- return;
+ return -ENOMEM;
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id);
+ err = -ENODEV;
goto free_mem;
}
@@ -1021,7 +1057,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
- return;
+ return 0;
remove_genpd:
pm_genpd_remove(&pg->genpd);
@@ -1040,25 +1076,67 @@ set_available:
free_mem:
kfree(pg);
+
+ return err;
}
-static void tegra_powergate_init(struct tegra_pmc *pmc,
- struct device_node *parent)
+static int tegra_powergate_init(struct tegra_pmc *pmc,
+ struct device_node *parent)
{
struct device_node *np, *child;
- unsigned int i;
+ int err = 0;
+
+ np = of_get_child_by_name(parent, "powergates");
+ if (!np)
+ return 0;
+
+ for_each_child_of_node(np, child) {
+ err = tegra_powergate_add(pmc, child);
+ if (err < 0) {
+ of_node_put(child);
+ break;
+ }
+ }
+
+ of_node_put(np);
+
+ return err;
+}
+
+static void tegra_powergate_remove(struct generic_pm_domain *genpd)
+{
+ struct tegra_powergate *pg = to_powergate(genpd);
+
+ reset_control_put(pg->reset);
+
+ while (pg->num_clks--)
+ clk_put(pg->clks[pg->num_clks]);
+
+ kfree(pg->clks);
- /* Create a bitmap of the available and valid partitions */
- for (i = 0; i < pmc->soc->num_powergates; i++)
- if (pmc->soc->powergates[i])
- set_bit(i, pmc->powergates_available);
+ set_bit(pg->id, pmc->powergates_available);
+
+ kfree(pg);
+}
+
+static void tegra_powergate_remove_all(struct device_node *parent)
+{
+ struct generic_pm_domain *genpd;
+ struct device_node *np, *child;
np = of_get_child_by_name(parent, "powergates");
if (!np)
return;
- for_each_child_of_node(np, child)
- tegra_powergate_add(pmc, child);
+ for_each_child_of_node(np, child) {
+ of_genpd_del_provider(child);
+
+ genpd = of_genpd_remove_last(child);
+ if (IS_ERR(genpd))
+ continue;
+
+ tegra_powergate_remove(genpd);
+ }
of_node_put(np);
}
@@ -1709,13 +1787,16 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
static ssize_t reset_reason_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u32 value, rst_src;
+ u32 value;
value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
- rst_src = (value & pmc->soc->regs->rst_source_mask) >>
- pmc->soc->regs->rst_source_shift;
+ value &= pmc->soc->regs->rst_source_mask;
+ value >>= pmc->soc->regs->rst_source_shift;
+
+ if (WARN_ON(value >= pmc->soc->num_reset_sources))
+ return sprintf(buf, "%s\n", "UNKNOWN");
- return sprintf(buf, "%s\n", pmc->soc->reset_sources[rst_src]);
+ return sprintf(buf, "%s\n", pmc->soc->reset_sources[value]);
}
static DEVICE_ATTR_RO(reset_reason);
@@ -1723,13 +1804,16 @@ static DEVICE_ATTR_RO(reset_reason);
static ssize_t reset_level_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u32 value, rst_lvl;
+ u32 value;
value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
- rst_lvl = (value & pmc->soc->regs->rst_level_mask) >>
- pmc->soc->regs->rst_level_shift;
+ value &= pmc->soc->regs->rst_level_mask;
+ value >>= pmc->soc->regs->rst_level_shift;
- return sprintf(buf, "%s\n", pmc->soc->reset_levels[rst_lvl]);
+ if (WARN_ON(value >= pmc->soc->num_reset_levels))
+ return sprintf(buf, "%s\n", "UNKNOWN");
+
+ return sprintf(buf, "%s\n", pmc->soc->reset_levels[value]);
}
static DEVICE_ATTR_RO(reset_level);
@@ -1999,7 +2083,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_powergate_debugfs_init();
if (err < 0)
- return err;
+ goto cleanup_sysfs;
}
err = register_restart_handler(&tegra_pmc_restart_handler);
@@ -2013,9 +2097,13 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (err)
goto cleanup_restart_handler;
+ err = tegra_powergate_init(pmc, pdev->dev.of_node);
+ if (err < 0)
+ goto cleanup_powergates;
+
err = tegra_pmc_irq_init(pmc);
if (err < 0)
- goto cleanup_restart_handler;
+ goto cleanup_powergates;
mutex_lock(&pmc->powergates_lock);
iounmap(pmc->base);
@@ -2026,10 +2114,15 @@ static int tegra_pmc_probe(struct platform_device *pdev)
return 0;
+cleanup_powergates:
+ tegra_powergate_remove_all(pdev->dev.of_node);
cleanup_restart_handler:
unregister_restart_handler(&tegra_pmc_restart_handler);
cleanup_debugfs:
debugfs_remove(pmc->debugfs);
+cleanup_sysfs:
+ device_remove_file(&pdev->dev, &dev_attr_reset_reason);
+ device_remove_file(&pdev->dev, &dev_attr_reset_level);
return err;
}
@@ -2185,7 +2278,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
.reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2236,7 +2329,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
.reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2347,7 +2440,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
.reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2452,8 +2545,8 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
- .reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .reset_sources = tegra210_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra210_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2578,9 +2671,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
.reset_sources = tegra186_reset_sources,
- .num_reset_sources = 14,
+ .num_reset_sources = ARRAY_SIZE(tegra186_reset_sources),
.reset_levels = tegra186_reset_levels,
- .num_reset_levels = 3,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra186_wake_events),
.wake_events = tegra186_wake_events,
};
@@ -2719,6 +2812,7 @@ static int __init tegra_pmc_early_init(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
+ unsigned int i;
bool invert;
mutex_init(&pmc->powergates_lock);
@@ -2775,7 +2869,10 @@ static int __init tegra_pmc_early_init(void)
if (pmc->soc->maybe_tz_only)
pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
- tegra_powergate_init(pmc, np);
+ /* Create a bitmap of the available and valid partitions */
+ for (i = 0; i < pmc->soc->num_powergates; i++)
+ if (pmc->soc->powergates[i])
+ set_bit(i, pmc->powergates_available);
/*
* Invert the interrupt polarity if a PMC device tree node
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index be4570baad96..57960e92ebe0 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -45,11 +45,12 @@ config KEYSTONE_NAVIGATOR_DMA
config AMX3_PM
tristate "AMx3 Power Management"
depends on SOC_AM33XX || SOC_AM43XX
- depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM
+ depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM && RTC_DRV_OMAP
help
Enable power management on AM335x and AM437x. Required for suspend to mem
and standby states on both AM335x and AM437x platforms and for deeper cpuidle
- c-states on AM335x.
+ c-states on AM335x. Also required for rtc and ddr in self-refresh low
+ power mode on AM437x platforms.
config WKUP_M3_IPC
tristate "TI AMx3 Wkup-M3 IPC Driver"
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index d0dab323651f..fc5802ccb1c0 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -6,6 +6,7 @@
* Vaibhav Bedia, Dave Gerlach
*/
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/genalloc.h>
@@ -13,9 +14,12 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/rtc/rtc-omap.h>
#include <linux/sizes.h>
#include <linux/sram.h>
#include <linux/suspend.h>
@@ -29,33 +33,162 @@
#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
(unsigned long)pm_sram->do_wfi)
+#define RTC_SCRATCH_RESUME_REG 0
+#define RTC_SCRATCH_MAGIC_REG 1
+#define RTC_REG_BOOT_MAGIC 0x8cd0 /* RTC */
+#define GIC_INT_SET_PENDING_BASE 0x200
+#define AM43XX_GIC_DIST_BASE 0x48241000
+
+static u32 rtc_magic_val;
+
static int (*am33xx_do_wfi_sram)(unsigned long unused);
static phys_addr_t am33xx_do_wfi_sram_phys;
static struct gen_pool *sram_pool, *sram_pool_data;
static unsigned long ocmcram_location, ocmcram_location_data;
+static struct rtc_device *omap_rtc;
+static void __iomem *gic_dist_base;
+
static struct am33xx_pm_platform_data *pm_ops;
static struct am33xx_pm_sram_addr *pm_sram;
static struct device *pm33xx_dev;
static struct wkup_m3_ipc *m3_ipc;
+#ifdef CONFIG_SUSPEND
+static int rtc_only_idle;
+static int retrigger_irq;
static unsigned long suspend_wfi_flags;
+static struct wkup_m3_wakeup_src wakeup_src = {.irq_nr = 0,
+ .src = "Unknown",
+};
+
+static struct wkup_m3_wakeup_src rtc_alarm_wakeup = {
+ .irq_nr = 108, .src = "RTC Alarm",
+};
+
+static struct wkup_m3_wakeup_src rtc_ext_wakeup = {
+ .irq_nr = 0, .src = "Ext wakeup",
+};
+#endif
+
static u32 sram_suspend_address(unsigned long addr)
{
return ((unsigned long)am33xx_do_wfi_sram +
AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
}
+static int am33xx_push_sram_idle(void)
+{
+ struct am33xx_pm_ro_sram_data ro_sram_data;
+ int ret;
+ u32 table_addr, ro_data_addr;
+ void *copy_addr;
+
+ ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
+ ro_sram_data.amx3_pm_sram_data_phys =
+ gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
+ ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+
+ /* Save physical address to calculate resume offset during pm init */
+ am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
+ ocmcram_location);
+
+ am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
+ pm_sram->do_wfi,
+ *pm_sram->do_wfi_sz);
+ if (!am33xx_do_wfi_sram) {
+ dev_err(pm33xx_dev,
+ "PM: %s: am33xx_do_wfi copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ table_addr =
+ sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
+ ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
+ if (ret) {
+ dev_dbg(pm33xx_dev,
+ "PM: %s: EMIF function copy failed\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ ro_data_addr =
+ sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
+ copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
+ &ro_sram_data,
+ sizeof(ro_sram_data));
+ if (!copy_addr) {
+ dev_err(pm33xx_dev,
+ "PM: %s: ro_sram_data copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init am43xx_map_gic(void)
+{
+ gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
+
+ if (!gic_dist_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
#ifdef CONFIG_SUSPEND
+struct wkup_m3_wakeup_src rtc_wake_src(void)
+{
+ u32 i;
+
+ i = __raw_readl(pm_ops->get_rtc_base_addr() + 0x44) & 0x40;
+
+ if (i) {
+ retrigger_irq = rtc_alarm_wakeup.irq_nr;
+ return rtc_alarm_wakeup;
+ }
+
+ retrigger_irq = rtc_ext_wakeup.irq_nr;
+
+ return rtc_ext_wakeup;
+}
+
+int am33xx_rtc_only_idle(unsigned long wfi_flags)
+{
+ omap_rtc_power_off_program(&omap_rtc->dev);
+ am33xx_do_wfi_sram(wfi_flags);
+ return 0;
+}
+
static int am33xx_pm_suspend(suspend_state_t suspend_state)
{
int i, ret = 0;
- ret = pm_ops->soc_suspend((unsigned long)suspend_state,
- am33xx_do_wfi_sram, suspend_wfi_flags);
+ if (suspend_state == PM_SUSPEND_MEM &&
+ pm_ops->check_off_mode_enable()) {
+ pm_ops->prepare_rtc_suspend();
+ pm_ops->save_context();
+ suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
+ clk_save_context();
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_rtc_only_idle,
+ suspend_wfi_flags);
+
+ suspend_wfi_flags &= ~WFI_FLAG_RTC_ONLY;
+
+ if (!ret) {
+ clk_restore_context();
+ pm_ops->restore_context();
+ m3_ipc->ops->set_rtc_only(m3_ipc);
+ am33xx_push_sram_idle();
+ }
+ } else {
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_do_wfi_sram,
+ suspend_wfi_flags);
+ }
if (ret) {
dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
@@ -77,8 +210,20 @@ static int am33xx_pm_suspend(suspend_state_t suspend_state)
"PM: CM3 returned unknown result = %d\n", i);
ret = -1;
}
+
+ /* print the wakeup reason */
+ if (rtc_only_idle) {
+ wakeup_src = rtc_wake_src();
+ pr_info("PM: Wakeup source %s\n", wakeup_src.src);
+ } else {
+ pr_info("PM: Wakeup source %s\n",
+ m3_ipc->ops->request_wake_src(m3_ipc));
+ }
}
+ if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
+ pm_ops->prepare_rtc_resume();
+
return ret;
}
@@ -101,6 +246,18 @@ static int am33xx_pm_enter(suspend_state_t suspend_state)
static int am33xx_pm_begin(suspend_state_t state)
{
int ret = -EINVAL;
+ struct nvmem_device *nvmem;
+
+ if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (nvmem)
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&rtc_magic_val);
+ rtc_only_idle = 1;
+ } else {
+ rtc_only_idle = 0;
+ }
switch (state) {
case PM_SUSPEND_MEM:
@@ -116,7 +273,28 @@ static int am33xx_pm_begin(suspend_state_t state)
static void am33xx_pm_end(void)
{
+ u32 val = 0;
+ struct nvmem_device *nvmem;
+
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
m3_ipc->ops->finish_low_power(m3_ipc);
+ if (rtc_only_idle) {
+ if (retrigger_irq)
+ /*
+ * 32 bits of Interrupt Set-Pending correspond to 32
+ * 32 interrupts. Compute the bit offset of the
+ * Interrupt and set that particular bit
+ * Compute the register offset by dividing interrupt
+ * number by 32 and mutiplying by 4
+ */
+ writel_relaxed(1 << (retrigger_irq & 31),
+ gic_dist_base + GIC_INT_SET_PENDING_BASE
+ + retrigger_irq / 32 * 4);
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&val);
+ }
+
+ rtc_only_idle = 0;
}
static int am33xx_pm_valid(suspend_state_t state)
@@ -219,51 +397,37 @@ mpu_put_node:
return ret;
}
-static int am33xx_push_sram_idle(void)
+static int am33xx_pm_rtc_setup(void)
{
- struct am33xx_pm_ro_sram_data ro_sram_data;
- int ret;
- u32 table_addr, ro_data_addr;
- void *copy_addr;
-
- ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
- ro_sram_data.amx3_pm_sram_data_phys =
- gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
- ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+ struct device_node *np;
+ unsigned long val = 0;
+ struct nvmem_device *nvmem;
- /* Save physical address to calculate resume offset during pm init */
- am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
- ocmcram_location);
+ np = of_find_node_by_name(NULL, "rtc");
- am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
- pm_sram->do_wfi,
- *pm_sram->do_wfi_sz);
- if (!am33xx_do_wfi_sram) {
- dev_err(pm33xx_dev,
- "PM: %s: am33xx_do_wfi copy to sram failed\n",
- __func__);
- return -ENODEV;
- }
-
- table_addr =
- sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
- ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
- if (ret) {
- dev_dbg(pm33xx_dev,
- "PM: %s: EMIF function copy failed\n", __func__);
- return -EPROBE_DEFER;
- }
+ if (of_device_is_available(np)) {
+ omap_rtc = rtc_class_open("rtc0");
+ if (!omap_rtc) {
+ pr_warn("PM: rtc0 not available");
+ return -EPROBE_DEFER;
+ }
- ro_data_addr =
- sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
- copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
- &ro_sram_data,
- sizeof(ro_sram_data));
- if (!copy_addr) {
- dev_err(pm33xx_dev,
- "PM: %s: ro_sram_data copy to sram failed\n",
- __func__);
- return -ENODEV;
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (nvmem) {
+ nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&rtc_magic_val);
+ if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
+ pr_warn("PM: bootloader does not support rtc-only!\n");
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&val);
+ val = pm_sram->resume_address;
+ nvmem_device_write(nvmem, RTC_SCRATCH_RESUME_REG * 4,
+ 4, (void *)&val);
+ }
+ } else {
+ pr_warn("PM: no-rtc available, rtc-only mode disabled.\n");
}
return 0;
@@ -284,34 +448,42 @@ static int am33xx_pm_probe(struct platform_device *pdev)
return -ENODEV;
}
+ ret = am43xx_map_gic();
+ if (ret) {
+ pr_err("PM: Could not ioremap GIC base\n");
+ return ret;
+ }
+
pm_sram = pm_ops->get_sram_addrs();
if (!pm_sram) {
dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
return -ENODEV;
}
+ m3_ipc = wkup_m3_ipc_get();
+ if (!m3_ipc) {
+ pr_err("PM: Cannot get wkup_m3_ipc handle\n");
+ return -EPROBE_DEFER;
+ }
+
pm33xx_dev = dev;
ret = am33xx_pm_alloc_sram();
if (ret)
return ret;
- ret = am33xx_push_sram_idle();
+ ret = am33xx_pm_rtc_setup();
if (ret)
goto err_free_sram;
- m3_ipc = wkup_m3_ipc_get();
- if (!m3_ipc) {
- dev_dbg(dev, "PM: Cannot get wkup_m3_ipc handle\n");
- ret = -EPROBE_DEFER;
+ ret = am33xx_push_sram_idle();
+ if (ret)
goto err_free_sram;
- }
am33xx_pm_set_ipc_ops();
#ifdef CONFIG_SUSPEND
suspend_set_ops(&am33xx_pm_ops);
-#endif /* CONFIG_SUSPEND */
/*
* For a system suspend we must flush the caches, we want
@@ -323,6 +495,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH;
suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF;
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
+#endif /* CONFIG_SUSPEND */
ret = pm_ops->init();
if (ret) {
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 354d256e6e00..600f57cf0c2e 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -23,6 +23,8 @@
/* Flag stating if PM nodes mapped to the PM domain has been requested */
#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
+static const struct zynqmp_eemi_ops *eemi_ops;
+
/**
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain
@@ -71,9 +73,8 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
{
int ret;
struct zynqmp_pm_domain *pd;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->set_requirement)
+ if (!eemi_ops->set_requirement)
return -ENXIO;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -107,9 +108,8 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
struct zynqmp_pm_domain *pd;
u32 capabilities = 0;
bool may_wakeup;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->set_requirement)
+ if (!eemi_ops->set_requirement)
return -ENXIO;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -160,9 +160,8 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
{
int ret;
struct zynqmp_pm_domain *pd;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->request_node)
+ if (!eemi_ops->request_node)
return -ENXIO;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -197,9 +196,8 @@ static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
{
int ret;
struct zynqmp_pm_domain *pd;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->release_node)
+ if (!eemi_ops->release_node)
return;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -266,6 +264,10 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
struct zynqmp_pm_domain *pd;
struct device *dev = &pdev->dev;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 771cb59b9d22..1b9d14411a15 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -31,6 +31,7 @@ static const char *const suspend_modes[] = {
};
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
+static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
@@ -92,9 +93,8 @@ static ssize_t suspend_mode_store(struct device *dev,
const char *buf, size_t count)
{
int md, ret = -EINVAL;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->set_suspend_mode)
+ if (!eemi_ops->set_suspend_mode)
return ret;
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
@@ -120,9 +120,11 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
int ret, irq;
u32 pm_api_version;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
- if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize)
+ if (!eemi_ops->get_api_version || !eemi_ops->init_finalize)
return -ENXIO;
eemi_ops->init_finalize();
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 3912526ead66..cdb613d38062 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 9f83e1b17aa1..9850a0efe85a 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -138,6 +139,7 @@
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
+static const struct zynqmp_eemi_ops *eemi_ops;
/**
* struct zynqmp_qspi - Defines qspi driver instance
@@ -1021,6 +1023,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!master)
return -ENOMEM;
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c
index 600928f63577..d35c4fb19e28 100644
--- a/drivers/staging/gasket/gasket_page_table.c
+++ b/drivers/staging/gasket/gasket_page_table.c
@@ -486,8 +486,8 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
off + i * PAGE_SIZE;
} else {
- ret = get_user_pages_fast(page_addr - offset, 1, 1,
- &page);
+ ret = get_user_pages_fast(page_addr - offset, 1,
+ FOLL_WRITE, &page);
if (ret <= 0) {
dev_err(pg_tbl->device,
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 1ba4a5154fb5..64037b0a8387 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1266,7 +1266,7 @@ static int prp_registered(struct v4l2_subdev *sd)
if (ret)
return ret;
- ret = imx_media_capture_device_register(priv->vdev);
+ ret = imx_media_capture_device_register(priv->md, priv->vdev);
if (ret)
return ret;
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b7ce9d439279..9430c835c434 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -701,7 +701,8 @@ void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
-int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
+int imx_media_capture_device_register(struct imx_media_dev *md,
+ struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct v4l2_subdev *sd = priv->src_sd;
@@ -710,8 +711,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
struct v4l2_subdev_format fmt_src;
int ret;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+ priv->md = md;
vfd->v4l2_dev = sd->v4l2_dev;
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 28fe66052cc7..1d248aca40a9 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1812,7 +1812,7 @@ static int csi_registered(struct v4l2_subdev *sd)
if (ret)
goto free_fim;
- ret = imx_media_capture_device_register(priv->vdev);
+ ret = imx_media_capture_device_register(priv->md, priv->vdev);
if (ret)
goto free_fim;
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index eb59ba0c3b62..6587aa49e005 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -268,7 +268,8 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct imx_media_video_dev *
imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad);
void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
-int imx_media_capture_device_register(struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_dev *md,
+ struct imx_media_video_dev *vdev);
void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 18eb5d3ecf10..a708a0340eb1 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1126,7 +1126,7 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = imx_media_capture_device_register(csi->vdev);
+ ret = imx_media_capture_device_register(csi->imxmd, csi->vdev);
if (ret < 0)
return ret;
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
index 58721c46fba4..8bbc905b26c8 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
@@ -352,7 +352,7 @@ static int rockchip_vpu_video_device_register(struct rockchip_vpu_dev *vpu)
vpu->vfd_enc = vfd;
video_set_drvdata(vfd, vpu);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret) {
v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
goto err_free_dev;
@@ -463,6 +463,8 @@ static int rockchip_vpu_probe(struct platform_device *pdev)
vpu->mdev.dev = vpu->dev;
strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
+ sizeof(vpu->mdev.model));
media_device_init(&vpu->mdev);
vpu->v4l2_dev.mdev = &vpu->mdev;
@@ -480,15 +482,18 @@ static int rockchip_vpu_probe(struct platform_device *pdev)
return 0;
err_video_dev_unreg:
if (vpu->vfd_enc) {
+ v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
video_unregister_device(vpu->vfd_enc);
video_device_release(vpu->vfd_enc);
}
err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
v4l2_m2m_release(vpu->m2m_dev);
err_v4l2_unreg:
v4l2_device_unregister(&vpu->v4l2_dev);
err_clk_unprepare:
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return ret;
}
@@ -500,15 +505,16 @@ static int rockchip_vpu_remove(struct platform_device *pdev)
v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
media_device_unregister(&vpu->mdev);
- v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
- v4l2_m2m_release(vpu->m2m_dev);
- media_device_cleanup(&vpu->mdev);
if (vpu->vfd_enc) {
+ v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
video_unregister_device(vpu->vfd_enc);
video_device_release(vpu->vfd_enc);
}
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
v4l2_device_unregister(&vpu->v4l2_dev);
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return 0;
}
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
index fb5e36aedd8c..dcbfc3cbc9f3 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
@@ -152,9 +152,10 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct rockchip_vpu_dev *vpu = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
- strscpy(cap->card, vpu->vfd_enc->name, sizeof(cap->card));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
vpu->dev->driver->name);
return 0;
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index 255e266c40e1..f5c716bb3413 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -3,7 +3,6 @@ config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
depends on I2C
- depends on BACKLIGHT_LCD_SUPPORT
depends on (GPIO_CS5535 || GPIO_CS5535=n)
select BACKLIGHT_CLASS_DEVICE
help
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 0842b6e6af82..48963eab32f5 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -419,9 +419,35 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return true;
}
+static struct tee_shm_pool *optee_config_dyn_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
+
static struct tee_shm_pool *
-optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
- u32 sec_caps)
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
{
union {
struct arm_smccc_res smccc;
@@ -436,10 +462,11 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
struct tee_shm_pool_mgr *priv_mgr;
struct tee_shm_pool_mgr *dmabuf_mgr;
void *rc;
+ const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK) {
- pr_info("shm service not available\n");
+ pr_err("static shm service not available\n");
return ERR_PTR(-ENOENT);
}
@@ -465,28 +492,15 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
}
vaddr = (unsigned long)va;
- /*
- * If OP-TEE can work with unregistered SHM, we will use own pool
- * for private shm
- */
- if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc))
- goto err_memunmap;
- priv_mgr = rc;
- } else {
- const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
-
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
- 3 /* 8 bytes aligned */);
- if (IS_ERR(rc))
- goto err_memunmap;
- priv_mgr = rc;
-
- vaddr += sz;
- paddr += sz;
- size -= sz;
- }
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+ 3 /* 8 bytes aligned */);
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+
+ vaddr += sz;
+ paddr += sz;
+ size -= sz;
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
if (IS_ERR(rc))
@@ -552,7 +566,7 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np)
static struct optee *optee_probe(struct device_node *np)
{
optee_invoke_fn *invoke_fn;
- struct tee_shm_pool *pool;
+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
@@ -581,13 +595,17 @@ static struct optee *optee_probe(struct device_node *np)
}
/*
- * We have no other option for shared memory, if secure world
- * doesn't have any reserved memory we can use we can't continue.
+ * Try to use dynamic shared memory if possible
*/
- if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
- return ERR_PTR(-EINVAL);
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pool = optee_config_dyn_shm();
+
+ /*
+ * If dynamic shared memory is not available or failed - try static one
+ */
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
- pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
if (IS_ERR(pool))
return (void *)pool;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 0b9ab1d0dd45..49fd7312e2aa 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -273,7 +273,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
- rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
+ rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
if (rc > 0)
shm->num_pages = rc;
if (rc != num_pages) {
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 653aa27a25a4..15bdd25780be 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig THERMAL
- tristate "Generic Thermal sysfs driver"
+ bool "Generic Thermal sysfs driver"
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal
@@ -11,7 +11,7 @@ menuconfig THERMAL
Each thermal zone contains its own temperature, trip points,
cooling devices.
All platforms with ACPI thermal support can use this driver.
- If you want this support, you should say Y or M here.
+ If you want this support, you should say Y here.
if THERMAL
@@ -24,7 +24,6 @@ config THERMAL_STATISTICS
config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
int "Emergency poweroff delay in milli-seconds"
- depends on THERMAL
default 0
help
Thermal subsystem will issue a graceful shutdown when
@@ -149,10 +148,9 @@ config THERMAL_GOV_POWER_ALLOCATOR
allocating and limiting power to devices.
config CPU_THERMAL
- bool "generic cpu cooling support"
+ bool "Generic cpu cooling support"
depends on CPU_FREQ
depends on THERMAL_OF
- depends on THERMAL=y
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
@@ -200,6 +198,17 @@ config THERMAL_EMULATION
because userland can easily disable the thermal policy by simply
flooding this sysfs node with low temperature values.
+config THERMAL_MMIO
+ tristate "Generic Thermal MMIO driver"
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This option enables the generic thermal MMIO driver that will use
+ memory-mapped reads to get the temperature. Any HW/System that
+ allows temperature reading by a single memory-mapped reading, be it
+ register or shared memory, is a potential candidate to work with this
+ driver.
+
config HISI_THERMAL
tristate "Hisilicon thermal driver"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 486d682be047..74a37c7f847a 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -29,6 +29,7 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
# platform thermal drivers
obj-y += broadcom/
+obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 2284cbecedf3..475ce2900771 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -3,7 +3,6 @@
* Copyright (C) 2018 Broadcom
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -100,18 +99,11 @@ static const struct of_device_id sr_thermal_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sr_thermal_of_match);
-static const struct acpi_device_id sr_thermal_acpi_ids[] = {
- { .id = "BRCM0500" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(acpi, sr_thermal_acpi_ids);
-
static struct platform_driver sr_thermal_driver = {
.probe = sr_thermal_probe,
.driver = {
.name = "sr-thermal",
.of_match_table = sr_thermal_of_match,
- .acpi_match_table = ACPI_PTR(sr_thermal_acpi_ids),
},
};
module_platform_driver(sr_thermal_driver);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f7c1f49ec87f..4c5db59a619b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -1,26 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/thermal/cpu_cooling.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
- * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
- * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
+ * Copyright (C) 2012-2018 Linaro Limited.
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * Authors: Amit Daniel <amit.kachhap@linaro.org>
+ * Viresh Kumar <viresh.kumar@linaro.org>
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/thermal.h>
@@ -99,7 +87,6 @@ struct cpufreq_cooling_device {
unsigned int clipped_freq;
unsigned int max_level;
struct freq_table *freq_table; /* In descending order */
- struct thermal_cooling_device *cdev;
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
@@ -207,8 +194,7 @@ static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
dev = get_cpu_device(cpu);
if (unlikely(!dev)) {
- dev_warn(&cpufreq_cdev->cdev->device,
- "No cpu device for cpu %d\n", cpu);
+ pr_warn("No cpu device for cpu %d\n", cpu);
return -ENODEV;
}
@@ -458,7 +444,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
load = 0;
total_load += load;
- if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
+ if (load_cpu)
load_cpu[i] = load;
i++;
@@ -541,7 +527,6 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
- power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
@@ -692,7 +677,6 @@ __cpufreq_cooling_register(struct device_node *np,
goto remove_ida;
cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
- cpufreq_cdev->cdev = cdev;
mutex_lock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
@@ -810,7 +794,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- thermal_cooling_device_unregister(cpufreq_cdev->cdev);
+ thermal_cooling_device_unregister(cdev);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index 2e013eeb4a1d..2c727a820759 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -1,6 +1,5 @@
config INTEL_POWERCLAMP
tristate "Intel PowerClamp idle injection driver"
- depends on THERMAL
depends on X86
depends on CPU_SUP_INTEL
help
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 0c19fcd56a0d..79a7df2baa92 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -220,6 +220,7 @@ static int int3403_add(struct platform_device *pdev)
{
struct int3403_priv *priv;
int result = 0;
+ unsigned long long tmp;
acpi_status status;
priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv),
@@ -234,19 +235,18 @@ static int int3403_add(struct platform_device *pdev)
goto err;
}
- status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
- NULL, &priv->type);
- if (ACPI_FAILURE(status)) {
- unsigned long long tmp;
- status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
- NULL, &tmp);
+ status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
+ NULL, &priv->type);
if (ACPI_FAILURE(status)) {
result = -EINVAL;
goto err;
- } else {
- priv->type = INT3403_TYPE_SENSOR;
}
+ } else {
+ priv->type = INT3403_TYPE_SENSOR;
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 8e1cf4d789be..2e6071a82da2 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -81,22 +81,13 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct pci_dev *pci_dev; \
- struct platform_device *pdev; \
- struct proc_thermal_device *proc_dev; \
+ struct proc_thermal_device *proc_dev = dev_get_drvdata(dev); \
\
if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
return 0; \
} \
\
- if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
- pdev = to_platform_device(dev); \
- proc_dev = platform_get_drvdata(pdev); \
- } else { \
- pci_dev = to_pci_dev(dev); \
- proc_dev = pci_get_drvdata(pci_dev); \
- } \
return sprintf(buf, "%lu\n",\
(unsigned long)proc_dev->power_limits[index].suffix * 1000); \
}
@@ -274,7 +265,7 @@ static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED);
break;
default:
- dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
+ dev_dbg(proc_priv->dev, "Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 2df059cc07e2..dc5093be553e 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -5,6 +5,9 @@
* Copyright (C) 2013 Texas Instruments
* Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/thermal.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index cdb455ffd575..3ce20fec86a2 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -1,6 +1,5 @@
config QCOM_TSENS
tristate "Qualcomm TSENS Temperature Alarm"
- depends on THERMAL
depends on QCOM_QFPROM
depends on ARCH_QCOM || COMPILE_TEST
help
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 717a08600bb5..fc6fe50cdde4 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
-qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-v2.o
+
+qcom_tsens-y += tsens.o tsens-common.o tsens-v0_1.o \
+ tsens-8960.o tsens-v2.o tsens-v1.o
obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
deleted file mode 100644
index c6dd620ac029..000000000000
--- a/drivers/thermal/qcom/tsens-8916.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/platform_device.h>
-#include "tsens.h"
-
-/* eeprom layout data for 8916 */
-#define BASE0_MASK 0x0000007f
-#define BASE1_MASK 0xfe000000
-#define BASE0_SHIFT 0
-#define BASE1_SHIFT 25
-
-#define S0_P1_MASK 0x00000f80
-#define S1_P1_MASK 0x003e0000
-#define S2_P1_MASK 0xf8000000
-#define S3_P1_MASK 0x000003e0
-#define S4_P1_MASK 0x000f8000
-
-#define S0_P2_MASK 0x0001f000
-#define S1_P2_MASK 0x07c00000
-#define S2_P2_MASK 0x0000001f
-#define S3_P2_MASK 0x00007c00
-#define S4_P2_MASK 0x01f00000
-
-#define S0_P1_SHIFT 7
-#define S1_P1_SHIFT 17
-#define S2_P1_SHIFT 27
-#define S3_P1_SHIFT 5
-#define S4_P1_SHIFT 15
-
-#define S0_P2_SHIFT 12
-#define S1_P2_SHIFT 22
-#define S2_P2_SHIFT 0
-#define S3_P2_SHIFT 10
-#define S4_P2_SHIFT 20
-
-#define CAL_SEL_MASK 0xe0000000
-#define CAL_SEL_SHIFT 29
-
-static int calibrate_8916(struct tsens_device *tmdev)
-{
- int base0 = 0, base1 = 0, i;
- u32 p1[5], p2[5];
- int mode = 0;
- u32 *qfprom_cdata, *qfprom_csel;
-
- qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib");
- if (IS_ERR(qfprom_cdata))
- return PTR_ERR(qfprom_cdata);
-
- qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel");
- if (IS_ERR(qfprom_csel))
- return PTR_ERR(qfprom_csel);
-
- mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
- dev_dbg(tmdev->dev, "calibration mode is %d\n", mode);
-
- switch (mode) {
- case TWO_PT_CALIB:
- base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
- p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
- p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
- p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT;
- p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
- p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
- for (i = 0; i < tmdev->num_sensors; i++)
- p2[i] = ((base1 + p2[i]) << 3);
- /* Fall through */
- case ONE_PT_CALIB2:
- base0 = (qfprom_cdata[0] & BASE0_MASK);
- p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
- p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
- p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
- p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
- p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
- for (i = 0; i < tmdev->num_sensors; i++)
- p1[i] = (((base0) + p1[i]) << 3);
- break;
- default:
- for (i = 0; i < tmdev->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
- }
- break;
- }
-
- compute_intercept_slope(tmdev, p1, p2, mode);
-
- return 0;
-}
-
-static const struct tsens_ops ops_8916 = {
- .init = init_common,
- .calibrate = calibrate_8916,
- .get_temp = get_temp_common,
-};
-
-const struct tsens_data data_8916 = {
- .num_sensors = 5,
- .ops = &ops_8916,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
- .hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
-};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 0f0adb302a7b..8d9b721dadb6 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -56,21 +56,21 @@
#define TRDY_MASK BIT(7)
#define TIMEOUT_US 100
-static int suspend_8960(struct tsens_device *tmdev)
+static int suspend_8960(struct tsens_priv *priv)
{
int ret;
unsigned int mask;
- struct regmap *map = tmdev->tm_map;
+ struct regmap *map = priv->tm_map;
- ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
+ ret = regmap_read(map, THRESHOLD_ADDR, &priv->ctx.threshold);
if (ret)
return ret;
- ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control);
+ ret = regmap_read(map, CNTL_ADDR, &priv->ctx.control);
if (ret)
return ret;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
mask = SLP_CLK_ENA | EN;
else
mask = SLP_CLK_ENA_8660 | EN;
@@ -82,10 +82,10 @@ static int suspend_8960(struct tsens_device *tmdev)
return 0;
}
-static int resume_8960(struct tsens_device *tmdev)
+static int resume_8960(struct tsens_priv *priv)
{
int ret;
- struct regmap *map = tmdev->tm_map;
+ struct regmap *map = priv->tm_map;
ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
if (ret)
@@ -95,80 +95,80 @@ static int resume_8960(struct tsens_device *tmdev)
* Separate CONFIG restore is not needed only for 8660 as
* config is part of CTRL Addr and its restored as such
*/
- if (tmdev->num_sensors > 1) {
+ if (priv->num_sensors > 1) {
ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
if (ret)
return ret;
}
- ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold);
+ ret = regmap_write(map, THRESHOLD_ADDR, priv->ctx.threshold);
if (ret)
return ret;
- ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control);
+ ret = regmap_write(map, CNTL_ADDR, priv->ctx.control);
if (ret)
return ret;
return 0;
}
-static int enable_8960(struct tsens_device *tmdev, int id)
+static int enable_8960(struct tsens_priv *priv, int id)
{
int ret;
u32 reg, mask;
- ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg);
+ ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
mask = BIT(id + SENSOR0_SHIFT);
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg | SW_RST);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else
reg |= mask | SLP_CLK_ENA_8660 | EN;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg);
if (ret)
return ret;
return 0;
}
-static void disable_8960(struct tsens_device *tmdev)
+static void disable_8960(struct tsens_priv *priv)
{
int ret;
u32 reg_cntl;
u32 mask;
- mask = GENMASK(tmdev->num_sensors - 1, 0);
+ mask = GENMASK(priv->num_sensors - 1, 0);
mask <<= SENSOR0_SHIFT;
mask |= EN;
- ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg_cntl);
+ ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg_cntl);
if (ret)
return;
reg_cntl &= ~mask;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
reg_cntl &= ~SLP_CLK_ENA;
else
reg_cntl &= ~SLP_CLK_ENA_8660;
- regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
}
-static int init_8960(struct tsens_device *tmdev)
+static int init_8960(struct tsens_priv *priv)
{
int ret, i;
u32 reg_cntl;
- tmdev->tm_map = dev_get_regmap(tmdev->dev, NULL);
- if (!tmdev->tm_map)
+ priv->tm_map = dev_get_regmap(priv->dev, NULL);
+ if (!priv->tm_map)
return -ENODEV;
/*
@@ -177,21 +177,21 @@ static int init_8960(struct tsens_device *tmdev)
* but the control registers stay in the same place, i.e
* directly after the first 5 status registers.
*/
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
if (i >= 5)
- tmdev->sensor[i].status = S0_STATUS_ADDR + 40;
- tmdev->sensor[i].status += i * 4;
+ priv->sensor[i].status = S0_STATUS_ADDR + 40;
+ priv->sensor[i].status += i * 4;
}
reg_cntl = SW_RST;
- ret = regmap_update_bits(tmdev->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
+ ret = regmap_update_bits(priv->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
if (ret)
return ret;
- if (tmdev->num_sensors > 1) {
+ if (priv->num_sensors > 1) {
reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(tmdev->tm_map, CONFIG_ADDR,
+ ret = regmap_update_bits(priv->tm_map, CONFIG_ADDR,
CONFIG_MASK, CONFIG);
} else {
reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
@@ -199,30 +199,30 @@ static int init_8960(struct tsens_device *tmdev)
reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
}
- reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ reg_cntl |= GENMASK(priv->num_sensors - 1, 0) << SENSOR0_SHIFT;
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
reg_cntl |= EN;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
return 0;
}
-static int calibrate_8960(struct tsens_device *tmdev)
+static int calibrate_8960(struct tsens_priv *priv)
{
int i;
char *data;
- ssize_t num_read = tmdev->num_sensors;
- struct tsens_sensor *s = tmdev->sensor;
+ ssize_t num_read = priv->num_sensors;
+ struct tsens_sensor *s = priv->sensor;
- data = qfprom_read(tmdev->dev, "calib");
+ data = qfprom_read(priv->dev, "calib");
if (IS_ERR(data))
- data = qfprom_read(tmdev->dev, "calib_backup");
+ data = qfprom_read(priv->dev, "calib_backup");
if (IS_ERR(data))
return PTR_ERR(data);
@@ -243,21 +243,21 @@ static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
return adc_code * slope + offset;
}
-static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
+static int get_temp_8960(struct tsens_priv *priv, int id, int *temp)
{
int ret;
u32 code, trdy;
- const struct tsens_sensor *s = &tmdev->sensor[id];
+ const struct tsens_sensor *s = &priv->sensor[id];
unsigned long timeout;
timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
do {
- ret = regmap_read(tmdev->tm_map, INT_STATUS_ADDR, &trdy);
+ ret = regmap_read(priv->tm_map, INT_STATUS_ADDR, &trdy);
if (ret)
return ret;
if (!(trdy & TRDY_MASK))
continue;
- ret = regmap_read(tmdev->tm_map, s->status, &code);
+ ret = regmap_read(priv->tm_map, s->status, &code);
if (ret)
return ret;
*temp = code_to_mdegC(code, s);
@@ -277,7 +277,7 @@ static const struct tsens_ops ops_8960 = {
.resume = resume_8960,
};
-const struct tsens_data data_8960 = {
+const struct tsens_plat_data data_8960 = {
.num_sensors = 11,
.ops = &ops_8960,
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index f80c73f11740..928e8e81ba69 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -12,18 +12,6 @@
#include <linux/regmap.h>
#include "tsens.h"
-/* SROT */
-#define TSENS_EN BIT(0)
-
-/* TM */
-#define STATUS_OFFSET 0x30
-#define SN_ADDR_OFFSET 0x4
-#define SN_ST_TEMP_MASK 0x3ff
-#define CAL_DEGC_PT1 30
-#define CAL_DEGC_PT2 120
-#define SLOPE_FACTOR 1000
-#define SLOPE_DEFAULT 3200
-
char *qfprom_read(struct device *dev, const char *cname)
{
struct nvmem_cell *cell;
@@ -46,18 +34,18 @@ char *qfprom_read(struct device *dev, const char *cname)
* and offset values are derived from tz->tzp->slope and tz->tzp->offset
* resp.
*/
-void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
u32 *p2, u32 mode)
{
int i;
int num, den;
- for (i = 0; i < tmdev->num_sensors; i++) {
- dev_dbg(tmdev->dev,
+ for (i = 0; i < priv->num_sensors; i++) {
+ dev_dbg(priv->dev,
"sensor%d - data_point1:%#x data_point2:%#x\n",
i, p1[i], p2[i]);
- tmdev->sensor[i].slope = SLOPE_DEFAULT;
+ priv->sensor[i].slope = SLOPE_DEFAULT;
if (mode == TWO_PT_CALIB) {
/*
* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
@@ -66,16 +54,30 @@ void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
num = p2[i] - p1[i];
num *= SLOPE_FACTOR;
den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
- tmdev->sensor[i].slope = num / den;
+ priv->sensor[i].slope = num / den;
}
- tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+ priv->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
(CAL_DEGC_PT1 *
- tmdev->sensor[i].slope);
- dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset);
+ priv->sensor[i].slope);
+ dev_dbg(priv->dev, "offset:%d\n", priv->sensor[i].offset);
}
}
+bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id)
+{
+ u32 val;
+ int ret;
+
+ if ((hw_id > (priv->num_sensors - 1)) || (hw_id < 0))
+ return -EINVAL;
+ ret = regmap_field_read(priv->rf[SENSOR_EN], &val);
+ if (ret)
+ return ret;
+
+ return val & (1 << hw_id);
+}
+
static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
{
int degc, num, den;
@@ -95,18 +97,54 @@ static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
return degc;
}
-int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
+int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp)
{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int status_reg;
+ struct tsens_sensor *s = &priv->sensor[i];
+ u32 temp_idx = LAST_TEMP_0 + s->hw_id;
+ u32 valid_idx = VALID_0 + s->hw_id;
+ u32 last_temp = 0, valid, mask;
+ int ret;
+
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ while (!valid) {
+ /* Valid bit is 0 for 6 AHB clock cycles.
+ * At 19.2MHz, 1 AHB clock is ~60ns.
+ * We should enter this loop very, very rarely.
+ */
+ ndelay(400);
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ }
+
+ /* Valid bit is set, OK to read the temperature */
+ ret = regmap_field_read(priv->rf[temp_idx], &last_temp);
+ if (ret)
+ return ret;
+
+ if (priv->feat->adc) {
+ /* Convert temperature from ADC code to milliCelsius */
+ *temp = code_to_degc(last_temp, s) * 1000;
+ } else {
+ mask = GENMASK(priv->fields[LAST_TEMP_0].msb,
+ priv->fields[LAST_TEMP_0].lsb);
+ /* Convert temperature from deciCelsius to milliCelsius */
+ *temp = sign_extend32(last_temp, fls(mask) - 1) * 100;
+ }
+
+ return 0;
+}
+
+int get_temp_common(struct tsens_priv *priv, int i, int *temp)
+{
+ struct tsens_sensor *s = &priv->sensor[i];
int last_temp = 0, ret;
- status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
+ ret = regmap_field_read(priv->rf[LAST_TEMP_0 + s->hw_id], &last_temp);
if (ret)
return ret;
- last_temp = code & SN_ST_TEMP_MASK;
*temp = code_to_degc(last_temp, s) * 1000;
@@ -127,21 +165,21 @@ static const struct regmap_config tsens_srot_config = {
.reg_stride = 4,
};
-int __init init_common(struct tsens_device *tmdev)
+int __init init_common(struct tsens_priv *priv)
{
void __iomem *tm_base, *srot_base;
+ struct device *dev = priv->dev;
struct resource *res;
- u32 code;
- int ret;
- struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
- u16 ctrl_offset = tmdev->reg_offsets[SROT_CTRL_OFFSET];
+ u32 enabled;
+ int ret, i, j;
+ struct platform_device *op = of_find_device_by_node(priv->dev->of_node);
if (!op)
return -EINVAL;
if (op->num_resources > 1) {
/* DT with separate SROT and TM address space */
- tmdev->tm_offset = 0;
+ priv->tm_offset = 0;
res = platform_get_resource(op, IORESOURCE_MEM, 1);
srot_base = devm_ioremap_resource(&op->dev, res);
if (IS_ERR(srot_base)) {
@@ -149,16 +187,15 @@ int __init init_common(struct tsens_device *tmdev)
goto err_put_device;
}
- tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev, srot_base,
+ priv->srot_map = devm_regmap_init_mmio(dev, srot_base,
&tsens_srot_config);
- if (IS_ERR(tmdev->srot_map)) {
- ret = PTR_ERR(tmdev->srot_map);
+ if (IS_ERR(priv->srot_map)) {
+ ret = PTR_ERR(priv->srot_map);
goto err_put_device;
}
-
} else {
/* old DTs where SROT and TM were in a contiguous 2K block */
- tmdev->tm_offset = 0x1000;
+ priv->tm_offset = 0x1000;
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
@@ -168,19 +205,47 @@ int __init init_common(struct tsens_device *tmdev)
goto err_put_device;
}
- tmdev->tm_map = devm_regmap_init_mmio(tmdev->dev, tm_base, &tsens_config);
- if (IS_ERR(tmdev->tm_map)) {
- ret = PTR_ERR(tmdev->tm_map);
+ priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config);
+ if (IS_ERR(priv->tm_map)) {
+ ret = PTR_ERR(priv->tm_map);
goto err_put_device;
}
- if (tmdev->srot_map) {
- ret = regmap_read(tmdev->srot_map, ctrl_offset, &code);
- if (ret)
+ priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[TSENS_EN]);
+ if (IS_ERR(priv->rf[TSENS_EN])) {
+ ret = PTR_ERR(priv->rf[TSENS_EN]);
+ goto err_put_device;
+ }
+ ret = regmap_field_read(priv->rf[TSENS_EN], &enabled);
+ if (ret)
+ goto err_put_device;
+ if (!enabled) {
+ dev_err(dev, "tsens device is not enabled\n");
+ ret = -ENODEV;
+ goto err_put_device;
+ }
+
+ priv->rf[SENSOR_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[SENSOR_EN]);
+ if (IS_ERR(priv->rf[SENSOR_EN])) {
+ ret = PTR_ERR(priv->rf[SENSOR_EN]);
+ goto err_put_device;
+ }
+ /* now alloc regmap_fields in tm_map */
+ for (i = 0, j = LAST_TEMP_0; i < priv->feat->max_sensors; i++, j++) {
+ priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[j]);
+ if (IS_ERR(priv->rf[j])) {
+ ret = PTR_ERR(priv->rf[j]);
goto err_put_device;
- if (!(code & TSENS_EN)) {
- dev_err(tmdev->dev, "tsens device is not enabled\n");
- ret = -ENODEV;
+ }
+ }
+ for (i = 0, j = VALID_0; i < priv->feat->max_sensors; i++, j++) {
+ priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[j]);
+ if (IS_ERR(priv->rf[j])) {
+ ret = PTR_ERR(priv->rf[j]);
goto err_put_device;
}
}
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-v0_1.c
index 3d3fda3d731b..a319283c223f 100644
--- a/drivers/thermal/qcom/tsens-8974.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -6,6 +6,48 @@
#include <linux/platform_device.h>
#include "tsens.h"
+/* ----- SROT ------ */
+#define SROT_CTRL_OFF 0x0000
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0000
+#define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004
+#define TM_Sn_STATUS_OFF 0x0030
+#define TM_TRDY_OFF 0x005c
+
+/* eeprom layout data for 8916 */
+#define MSM8916_BASE0_MASK 0x0000007f
+#define MSM8916_BASE1_MASK 0xfe000000
+#define MSM8916_BASE0_SHIFT 0
+#define MSM8916_BASE1_SHIFT 25
+
+#define MSM8916_S0_P1_MASK 0x00000f80
+#define MSM8916_S1_P1_MASK 0x003e0000
+#define MSM8916_S2_P1_MASK 0xf8000000
+#define MSM8916_S3_P1_MASK 0x000003e0
+#define MSM8916_S4_P1_MASK 0x000f8000
+
+#define MSM8916_S0_P2_MASK 0x0001f000
+#define MSM8916_S1_P2_MASK 0x07c00000
+#define MSM8916_S2_P2_MASK 0x0000001f
+#define MSM8916_S3_P2_MASK 0x00007c00
+#define MSM8916_S4_P2_MASK 0x01f00000
+
+#define MSM8916_S0_P1_SHIFT 7
+#define MSM8916_S1_P1_SHIFT 17
+#define MSM8916_S2_P1_SHIFT 27
+#define MSM8916_S3_P1_SHIFT 5
+#define MSM8916_S4_P1_SHIFT 15
+
+#define MSM8916_S0_P2_SHIFT 12
+#define MSM8916_S1_P2_SHIFT 22
+#define MSM8916_S2_P2_SHIFT 0
+#define MSM8916_S3_P2_SHIFT 10
+#define MSM8916_S4_P2_SHIFT 20
+
+#define MSM8916_CAL_SEL_MASK 0xe0000000
+#define MSM8916_CAL_SEL_SHIFT 29
+
/* eeprom layout data for 8974 */
#define BASE1_MASK 0xff
#define S0_P1_MASK 0x3f00
@@ -91,7 +133,59 @@
#define BIT_APPEND 0x3
-static int calibrate_8974(struct tsens_device *tmdev)
+static int calibrate_8916(struct tsens_priv *priv)
+{
+ int base0 = 0, base1 = 0, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata, *qfprom_csel;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ qfprom_csel = (u32 *)qfprom_read(priv->dev, "calib_sel");
+ if (IS_ERR(qfprom_csel))
+ return PTR_ERR(qfprom_csel);
+
+ mode = (qfprom_csel[0] & MSM8916_CAL_SEL_MASK) >> MSM8916_CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[1] & MSM8916_BASE1_MASK) >> MSM8916_BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & MSM8916_S0_P2_MASK) >> MSM8916_S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & MSM8916_S1_P2_MASK) >> MSM8916_S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & MSM8916_S2_P2_MASK) >> MSM8916_S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & MSM8916_S3_P2_MASK) >> MSM8916_S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 3);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & MSM8916_S1_P1_MASK) >> MSM8916_S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & MSM8916_S2_P1_MASK) >> MSM8916_S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & MSM8916_S3_P1_MASK) >> MSM8916_S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & MSM8916_S4_P1_MASK) >> MSM8916_S4_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 3);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+
+ return 0;
+}
+
+static int calibrate_8974(struct tsens_priv *priv)
{
int base1 = 0, base2 = 0, i;
u32 p1[11], p2[11];
@@ -99,11 +193,11 @@ static int calibrate_8974(struct tsens_device *tmdev)
u32 *calib, *bkp;
u32 calib_redun_sel;
- calib = (u32 *)qfprom_read(tmdev->dev, "calib");
+ calib = (u32 *)qfprom_read(priv->dev, "calib");
if (IS_ERR(calib))
return PTR_ERR(calib);
- bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup");
+ bkp = (u32 *)qfprom_read(priv->dev, "calib_backup");
if (IS_ERR(bkp))
return PTR_ERR(bkp);
@@ -184,25 +278,25 @@ static int calibrate_8974(struct tsens_device *tmdev)
switch (mode) {
case ONE_PT_CALIB:
- for (i = 0; i < tmdev->num_sensors; i++)
+ for (i = 0; i < priv->num_sensors; i++)
p1[i] += (base1 << 2) | BIT_APPEND;
break;
case TWO_PT_CALIB:
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
p2[i] += base2;
p2[i] <<= 2;
p2[i] |= BIT_APPEND;
}
/* Fall through */
case ONE_PT_CALIB2:
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
p1[i] += base1;
p1[i] <<= 2;
p1[i] |= BIT_APPEND;
}
break;
default:
- for (i = 0; i < tmdev->num_sensors; i++)
+ for (i = 0; i < priv->num_sensors; i++)
p2[i] = 780;
p1[0] = 502;
p1[1] = 509;
@@ -218,19 +312,71 @@ static int calibrate_8974(struct tsens_device *tmdev)
break;
}
- compute_intercept_slope(tmdev, p1, p2, mode);
+ compute_intercept_slope(priv, p1, p2, mode);
return 0;
}
+/* v0.1: 8916, 8974 */
+
+static const struct tsens_features tsens_v0_1_feat = {
+ .ver_major = VER_0_1,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 1,
+ .max_sensors = 11,
+};
+
+static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* No VERSION information */
+
+ /* CTRL_OFFSET */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
+ /* No VALID field on v0.1 */
+ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
+ REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
+ REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
+ /* No CRITICAL field on v0.1 */
+ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
+
+static const struct tsens_ops ops_8916 = {
+ .init = init_common,
+ .calibrate = calibrate_8916,
+ .get_temp = get_temp_common,
+};
+
+const struct tsens_plat_data data_8916 = {
+ .num_sensors = 5,
+ .ops = &ops_8916,
+ .hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
+
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+};
+
static const struct tsens_ops ops_8974 = {
.init = init_common,
.calibrate = calibrate_8974,
.get_temp = get_temp_common,
};
-const struct tsens_data data_8974 = {
+const struct tsens_plat_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
new file mode 100644
index 000000000000..10b595d4f619
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include "tsens.h"
+
+/* ----- SROT ------ */
+#define SROT_HW_VER_OFF 0x0000
+#define SROT_CTRL_OFF 0x0004
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0000
+#define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004
+#define TM_Sn_STATUS_OFF 0x0044
+#define TM_TRDY_OFF 0x0084
+
+/* eeprom layout data for qcs404/405 (v1) */
+#define BASE0_MASK 0x000007f8
+#define BASE1_MASK 0x0007f800
+#define BASE0_SHIFT 3
+#define BASE1_SHIFT 11
+
+#define S0_P1_MASK 0x0000003f
+#define S1_P1_MASK 0x0003f000
+#define S2_P1_MASK 0x3f000000
+#define S3_P1_MASK 0x000003f0
+#define S4_P1_MASK 0x003f0000
+#define S5_P1_MASK 0x0000003f
+#define S6_P1_MASK 0x0003f000
+#define S7_P1_MASK 0x3f000000
+#define S8_P1_MASK 0x000003f0
+#define S9_P1_MASK 0x003f0000
+
+#define S0_P2_MASK 0x00000fc0
+#define S1_P2_MASK 0x00fc0000
+#define S2_P2_MASK_1_0 0xc0000000
+#define S2_P2_MASK_5_2 0x0000000f
+#define S3_P2_MASK 0x0000fc00
+#define S4_P2_MASK 0x0fc00000
+#define S5_P2_MASK 0x00000fc0
+#define S6_P2_MASK 0x00fc0000
+#define S7_P2_MASK_1_0 0xc0000000
+#define S7_P2_MASK_5_2 0x0000000f
+#define S8_P2_MASK 0x0000fc00
+#define S9_P2_MASK 0x0fc00000
+
+#define S0_P1_SHIFT 0
+#define S0_P2_SHIFT 6
+#define S1_P1_SHIFT 12
+#define S1_P2_SHIFT 18
+#define S2_P1_SHIFT 24
+#define S2_P2_SHIFT_1_0 30
+
+#define S2_P2_SHIFT_5_2 0
+#define S3_P1_SHIFT 4
+#define S3_P2_SHIFT 10
+#define S4_P1_SHIFT 16
+#define S4_P2_SHIFT 22
+
+#define S5_P1_SHIFT 0
+#define S5_P2_SHIFT 6
+#define S6_P1_SHIFT 12
+#define S6_P2_SHIFT 18
+#define S7_P1_SHIFT 24
+#define S7_P2_SHIFT_1_0 30
+
+#define S7_P2_SHIFT_5_2 0
+#define S8_P1_SHIFT 4
+#define S8_P2_SHIFT 10
+#define S9_P1_SHIFT 16
+#define S9_P2_SHIFT 22
+
+#define CAL_SEL_MASK 7
+#define CAL_SEL_SHIFT 0
+
+static int calibrate_v1(struct tsens_priv *priv)
+{
+ u32 base0 = 0, base1 = 0;
+ u32 p1[10], p2[10];
+ u32 mode = 0, lsb = 0, msb = 0;
+ u32 *qfprom_cdata;
+ int i;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ mode = (qfprom_cdata[4] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[4] & BASE1_MASK) >> BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
+ /* This value is split over two registers, 2 bits and 4 bits */
+ lsb = (qfprom_cdata[0] & S2_P2_MASK_1_0) >> S2_P2_SHIFT_1_0;
+ msb = (qfprom_cdata[1] & S2_P2_MASK_5_2) >> S2_P2_SHIFT_5_2;
+ p2[2] = msb << 2 | lsb;
+ p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
+ p2[5] = (qfprom_cdata[2] & S5_P2_MASK) >> S5_P2_SHIFT;
+ p2[6] = (qfprom_cdata[2] & S6_P2_MASK) >> S6_P2_SHIFT;
+ /* This value is split over two registers, 2 bits and 4 bits */
+ lsb = (qfprom_cdata[2] & S7_P2_MASK_1_0) >> S7_P2_SHIFT_1_0;
+ msb = (qfprom_cdata[3] & S7_P2_MASK_5_2) >> S7_P2_SHIFT_5_2;
+ p2[7] = msb << 2 | lsb;
+ p2[8] = (qfprom_cdata[3] & S8_P2_MASK) >> S8_P2_SHIFT;
+ p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 2);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT;
+ p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
+ p1[5] = (qfprom_cdata[2] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (qfprom_cdata[2] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (qfprom_cdata[2] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (qfprom_cdata[3] & S8_P1_MASK) >> S8_P1_SHIFT;
+ p1[9] = (qfprom_cdata[3] & S9_P1_MASK) >> S9_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 2);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+
+ return 0;
+}
+
+/* v1.x: qcs404,405 */
+
+static const struct tsens_features tsens_v1_feat = {
+ .ver_major = VER_1_X,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 1,
+ .max_sensors = 11,
+};
+
+static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* VERSION */
+ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31),
+ [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27),
+ [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15),
+ /* CTRL_OFFSET */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
+ REG_FIELD_FOR_EACH_SENSOR11(VALID, TM_Sn_STATUS_OFF, 14, 14),
+ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
+ REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
+ REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
+ /* No CRITICAL field on v1.x */
+ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
+
+static const struct tsens_ops ops_generic_v1 = {
+ .init = init_common,
+ .calibrate = calibrate_v1,
+ .get_temp = get_temp_tsens_valid,
+};
+
+const struct tsens_plat_data data_tsens_v1 = {
+ .ops = &ops_generic_v1,
+ .feat = &tsens_v1_feat,
+ .fields = tsens_v1_regfields,
+};
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 381a212872bf..1099069f2aa3 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -4,76 +4,81 @@
* Copyright (c) 2018, Linaro Limited
*/
-#include <linux/regmap.h>
#include <linux/bitops.h>
+#include <linux/regmap.h>
#include "tsens.h"
-#define STATUS_OFFSET 0xa0
-#define LAST_TEMP_MASK 0xfff
-#define STATUS_VALID_BIT BIT(21)
+/* ----- SROT ------ */
+#define SROT_HW_VER_OFF 0x0000
+#define SROT_CTRL_OFF 0x0004
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0004
+#define TM_UPPER_LOWER_INT_STATUS_OFF 0x0008
+#define TM_UPPER_LOWER_INT_CLEAR_OFF 0x000c
+#define TM_UPPER_LOWER_INT_MASK_OFF 0x0010
+#define TM_CRITICAL_INT_STATUS_OFF 0x0014
+#define TM_CRITICAL_INT_CLEAR_OFF 0x0018
+#define TM_CRITICAL_INT_MASK_OFF 0x001c
+#define TM_Sn_UPPER_LOWER_THRESHOLD_OFF 0x0020
+#define TM_Sn_CRITICAL_THRESHOLD_OFF 0x0060
+#define TM_Sn_STATUS_OFF 0x00a0
+#define TM_TRDY_OFF 0x00e4
-static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
-{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int status_reg;
- u32 last_temp = 0, last_temp2 = 0, last_temp3 = 0;
- int ret;
+/* v2.x: 8996, 8998, sdm845 */
- status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- last_temp = code & LAST_TEMP_MASK;
- if (code & STATUS_VALID_BIT)
- goto done;
+static const struct tsens_features tsens_v2_feat = {
+ .ver_major = VER_2_X,
+ .crit_int = 1,
+ .adc = 0,
+ .srot_split = 1,
+ .max_sensors = 16,
+};
- /* Try a second time */
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp2 = code & LAST_TEMP_MASK;
- }
+static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* VERSION */
+ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31),
+ [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27),
+ [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15),
+ /* CTRL_OFF */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 18),
- /* Try a third/last time */
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp3 = code & LAST_TEMP_MASK;
- }
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ /* v2 has separate enables for UPPER/LOWER/CRITICAL interrupts */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2),
- if (last_temp == last_temp2)
- last_temp = last_temp2;
- else if (last_temp2 == last_temp3)
- last_temp = last_temp3;
-done:
- /* Convert temperature from deciCelsius to milliCelsius */
- *temp = sign_extend32(last_temp, fls(LAST_TEMP_MASK) - 1) * 100;
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11),
+ REG_FIELD_FOR_EACH_SENSOR16(VALID, TM_Sn_STATUS_OFF, 21, 21),
+ REG_FIELD_FOR_EACH_SENSOR16(MIN_STATUS, TM_Sn_STATUS_OFF, 16, 16),
+ REG_FIELD_FOR_EACH_SENSOR16(LOWER_STATUS, TM_Sn_STATUS_OFF, 17, 17),
+ REG_FIELD_FOR_EACH_SENSOR16(UPPER_STATUS, TM_Sn_STATUS_OFF, 18, 18),
+ REG_FIELD_FOR_EACH_SENSOR16(CRITICAL_STATUS, TM_Sn_STATUS_OFF, 19, 19),
+ REG_FIELD_FOR_EACH_SENSOR16(MAX_STATUS, TM_Sn_STATUS_OFF, 20, 20),
- return 0;
-}
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
static const struct tsens_ops ops_generic_v2 = {
.init = init_common,
- .get_temp = get_temp_tsens_v2,
+ .get_temp = get_temp_tsens_valid,
};
-const struct tsens_data data_tsens_v2 = {
- .ops = &ops_generic_v2,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
+const struct tsens_plat_data data_tsens_v2 = {
+ .ops = &ops_generic_v2,
+ .feat = &tsens_v2_feat,
+ .fields = tsens_v2_regfields,
};
/* Kept around for backward compatibility with old msm8996.dtsi */
-const struct tsens_data data_8996 = {
+const struct tsens_plat_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
+ .feat = &tsens_v2_feat,
+ .fields = tsens_v2_regfields,
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index f1ec9bbe4717..36b0b52db524 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -15,38 +15,38 @@
static int tsens_get_temp(void *data, int *temp)
{
const struct tsens_sensor *s = data;
- struct tsens_device *tmdev = s->tmdev;
+ struct tsens_priv *priv = s->priv;
- return tmdev->ops->get_temp(tmdev, s->id, temp);
+ return priv->ops->get_temp(priv, s->id, temp);
}
-static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend)
+static int tsens_get_trend(void *data, int trip, enum thermal_trend *trend)
{
- const struct tsens_sensor *s = p;
- struct tsens_device *tmdev = s->tmdev;
+ const struct tsens_sensor *s = data;
+ struct tsens_priv *priv = s->priv;
- if (tmdev->ops->get_trend)
- return tmdev->ops->get_trend(tmdev, s->id, trend);
+ if (priv->ops->get_trend)
+ return priv->ops->get_trend(priv, s->id, trend);
return -ENOTSUPP;
}
static int __maybe_unused tsens_suspend(struct device *dev)
{
- struct tsens_device *tmdev = dev_get_drvdata(dev);
+ struct tsens_priv *priv = dev_get_drvdata(dev);
- if (tmdev->ops && tmdev->ops->suspend)
- return tmdev->ops->suspend(tmdev);
+ if (priv->ops && priv->ops->suspend)
+ return priv->ops->suspend(priv);
return 0;
}
static int __maybe_unused tsens_resume(struct device *dev)
{
- struct tsens_device *tmdev = dev_get_drvdata(dev);
+ struct tsens_priv *priv = dev_get_drvdata(dev);
- if (tmdev->ops && tmdev->ops->resume)
- return tmdev->ops->resume(tmdev);
+ if (priv->ops && priv->ops->resume)
+ return priv->ops->resume(priv);
return 0;
}
@@ -64,6 +64,9 @@ static const struct of_device_id tsens_table[] = {
.compatible = "qcom,msm8996-tsens",
.data = &data_8996,
}, {
+ .compatible = "qcom,tsens-v1",
+ .data = &data_tsens_v1,
+ }, {
.compatible = "qcom,tsens-v2",
.data = &data_tsens_v2,
},
@@ -76,22 +79,27 @@ static const struct thermal_zone_of_device_ops tsens_of_ops = {
.get_trend = tsens_get_trend,
};
-static int tsens_register(struct tsens_device *tmdev)
+static int tsens_register(struct tsens_priv *priv)
{
int i;
struct thermal_zone_device *tzd;
- for (i = 0; i < tmdev->num_sensors; i++) {
- tmdev->sensor[i].tmdev = tmdev;
- tmdev->sensor[i].id = i;
- tzd = devm_thermal_zone_of_sensor_register(tmdev->dev, i,
- &tmdev->sensor[i],
+ for (i = 0; i < priv->num_sensors; i++) {
+ if (!is_sensor_enabled(priv, priv->sensor[i].hw_id)) {
+ dev_err(priv->dev, "sensor %d: disabled\n",
+ priv->sensor[i].hw_id);
+ continue;
+ }
+ priv->sensor[i].priv = priv;
+ priv->sensor[i].id = i;
+ tzd = devm_thermal_zone_of_sensor_register(priv->dev, i,
+ &priv->sensor[i],
&tsens_of_ops);
if (IS_ERR(tzd))
continue;
- tmdev->sensor[i].tzd = tzd;
- if (tmdev->ops->enable)
- tmdev->ops->enable(tmdev, i);
+ priv->sensor[i].tzd = tzd;
+ if (priv->ops->enable)
+ priv->ops->enable(priv, i);
}
return 0;
}
@@ -101,8 +109,8 @@ static int tsens_probe(struct platform_device *pdev)
int ret, i;
struct device *dev;
struct device_node *np;
- struct tsens_device *tmdev;
- const struct tsens_data *data;
+ struct tsens_priv *priv;
+ const struct tsens_plat_data *data;
const struct of_device_id *id;
u32 num_sensors;
@@ -129,55 +137,55 @@ static int tsens_probe(struct platform_device *pdev)
return -EINVAL;
}
- tmdev = devm_kzalloc(dev,
- struct_size(tmdev, sensor, num_sensors),
+ priv = devm_kzalloc(dev,
+ struct_size(priv, sensor, num_sensors),
GFP_KERNEL);
- if (!tmdev)
+ if (!priv)
return -ENOMEM;
- tmdev->dev = dev;
- tmdev->num_sensors = num_sensors;
- tmdev->ops = data->ops;
- for (i = 0; i < tmdev->num_sensors; i++) {
+ priv->dev = dev;
+ priv->num_sensors = num_sensors;
+ priv->ops = data->ops;
+ for (i = 0; i < priv->num_sensors; i++) {
if (data->hw_ids)
- tmdev->sensor[i].hw_id = data->hw_ids[i];
+ priv->sensor[i].hw_id = data->hw_ids[i];
else
- tmdev->sensor[i].hw_id = i;
- }
- for (i = 0; i < REG_ARRAY_SIZE; i++) {
- tmdev->reg_offsets[i] = data->reg_offsets[i];
+ priv->sensor[i].hw_id = i;
}
+ priv->feat = data->feat;
+ priv->fields = data->fields;
- if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
+ if (!priv->ops || !priv->ops->init || !priv->ops->get_temp)
return -EINVAL;
- ret = tmdev->ops->init(tmdev);
+ ret = priv->ops->init(priv);
if (ret < 0) {
dev_err(dev, "tsens init failed\n");
return ret;
}
- if (tmdev->ops->calibrate) {
- ret = tmdev->ops->calibrate(tmdev);
+ if (priv->ops->calibrate) {
+ ret = priv->ops->calibrate(priv);
if (ret < 0) {
- dev_err(dev, "tsens calibration failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "tsens calibration failed\n");
return ret;
}
}
- ret = tsens_register(tmdev);
+ ret = tsens_register(priv);
- platform_set_drvdata(pdev, tmdev);
+ platform_set_drvdata(pdev, priv);
return ret;
}
static int tsens_remove(struct platform_device *pdev)
{
- struct tsens_device *tmdev = platform_get_drvdata(pdev);
+ struct tsens_priv *priv = platform_get_drvdata(pdev);
- if (tmdev->ops->disable)
- tmdev->ops->disable(tmdev);
+ if (priv->ops->disable)
+ priv->ops->disable(priv);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 7b7feee5dc46..eefe3844fb4e 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -9,17 +9,39 @@
#define ONE_PT_CALIB 0x1
#define ONE_PT_CALIB2 0x2
#define TWO_PT_CALIB 0x3
+#define CAL_DEGC_PT1 30
+#define CAL_DEGC_PT2 120
+#define SLOPE_FACTOR 1000
+#define SLOPE_DEFAULT 3200
+
#include <linux/thermal.h>
+#include <linux/regmap.h>
+
+struct tsens_priv;
-struct tsens_device;
+enum tsens_ver {
+ VER_0_1 = 0,
+ VER_1_X,
+ VER_2_X,
+};
+/**
+ * struct tsens_sensor - data for each sensor connected to the tsens device
+ * @priv: tsens device instance that this sensor is connected to
+ * @tzd: pointer to the thermal zone that this sensor is in
+ * @offset: offset of temperature adjustment curve
+ * @id: Sensor ID
+ * @hw_id: HW ID can be used in case of platform-specific IDs
+ * @slope: slope of temperature adjustment curve
+ * @status: 8960-specific variable to track 8960 and 8660 status register offset
+ */
struct tsens_sensor {
- struct tsens_device *tmdev;
+ struct tsens_priv *priv;
struct thermal_zone_device *tzd;
int offset;
- int id;
- int hw_id;
+ unsigned int id;
+ unsigned int hw_id;
int slope;
u32 status;
};
@@ -37,63 +59,274 @@ struct tsens_sensor {
*/
struct tsens_ops {
/* mandatory callbacks */
- int (*init)(struct tsens_device *);
- int (*calibrate)(struct tsens_device *);
- int (*get_temp)(struct tsens_device *, int, int *);
+ int (*init)(struct tsens_priv *priv);
+ int (*calibrate)(struct tsens_priv *priv);
+ int (*get_temp)(struct tsens_priv *priv, int i, int *temp);
/* optional callbacks */
- int (*enable)(struct tsens_device *, int);
- void (*disable)(struct tsens_device *);
- int (*suspend)(struct tsens_device *);
- int (*resume)(struct tsens_device *);
- int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
+ int (*enable)(struct tsens_priv *priv, int i);
+ void (*disable)(struct tsens_priv *priv);
+ int (*suspend)(struct tsens_priv *priv);
+ int (*resume)(struct tsens_priv *priv);
+ int (*get_trend)(struct tsens_priv *priv, int i, enum thermal_trend *trend);
};
-enum reg_list {
- SROT_CTRL_OFFSET,
+#define REG_FIELD_FOR_EACH_SENSOR11(_name, _offset, _startbit, _stopbit) \
+ [_name##_##0] = REG_FIELD(_offset, _startbit, _stopbit), \
+ [_name##_##1] = REG_FIELD(_offset + 4, _startbit, _stopbit), \
+ [_name##_##2] = REG_FIELD(_offset + 8, _startbit, _stopbit), \
+ [_name##_##3] = REG_FIELD(_offset + 12, _startbit, _stopbit), \
+ [_name##_##4] = REG_FIELD(_offset + 16, _startbit, _stopbit), \
+ [_name##_##5] = REG_FIELD(_offset + 20, _startbit, _stopbit), \
+ [_name##_##6] = REG_FIELD(_offset + 24, _startbit, _stopbit), \
+ [_name##_##7] = REG_FIELD(_offset + 28, _startbit, _stopbit), \
+ [_name##_##8] = REG_FIELD(_offset + 32, _startbit, _stopbit), \
+ [_name##_##9] = REG_FIELD(_offset + 36, _startbit, _stopbit), \
+ [_name##_##10] = REG_FIELD(_offset + 40, _startbit, _stopbit)
- REG_ARRAY_SIZE,
+#define REG_FIELD_FOR_EACH_SENSOR16(_name, _offset, _startbit, _stopbit) \
+ [_name##_##0] = REG_FIELD(_offset, _startbit, _stopbit), \
+ [_name##_##1] = REG_FIELD(_offset + 4, _startbit, _stopbit), \
+ [_name##_##2] = REG_FIELD(_offset + 8, _startbit, _stopbit), \
+ [_name##_##3] = REG_FIELD(_offset + 12, _startbit, _stopbit), \
+ [_name##_##4] = REG_FIELD(_offset + 16, _startbit, _stopbit), \
+ [_name##_##5] = REG_FIELD(_offset + 20, _startbit, _stopbit), \
+ [_name##_##6] = REG_FIELD(_offset + 24, _startbit, _stopbit), \
+ [_name##_##7] = REG_FIELD(_offset + 28, _startbit, _stopbit), \
+ [_name##_##8] = REG_FIELD(_offset + 32, _startbit, _stopbit), \
+ [_name##_##9] = REG_FIELD(_offset + 36, _startbit, _stopbit), \
+ [_name##_##10] = REG_FIELD(_offset + 40, _startbit, _stopbit), \
+ [_name##_##11] = REG_FIELD(_offset + 44, _startbit, _stopbit), \
+ [_name##_##12] = REG_FIELD(_offset + 48, _startbit, _stopbit), \
+ [_name##_##13] = REG_FIELD(_offset + 52, _startbit, _stopbit), \
+ [_name##_##14] = REG_FIELD(_offset + 56, _startbit, _stopbit), \
+ [_name##_##15] = REG_FIELD(_offset + 60, _startbit, _stopbit)
+
+/* reg_field IDs to use as an index into an array */
+enum regfield_ids {
+ /* ----- SROT ------ */
+ /* HW_VER */
+ VER_MAJOR = 0,
+ VER_MINOR,
+ VER_STEP,
+ /* CTRL_OFFSET */
+ TSENS_EN = 3,
+ TSENS_SW_RST,
+ SENSOR_EN,
+ CODE_OR_TEMP,
+
+ /* ----- TM ------ */
+ /* STATUS */
+ LAST_TEMP_0 = 7, /* Last temperature reading */
+ LAST_TEMP_1,
+ LAST_TEMP_2,
+ LAST_TEMP_3,
+ LAST_TEMP_4,
+ LAST_TEMP_5,
+ LAST_TEMP_6,
+ LAST_TEMP_7,
+ LAST_TEMP_8,
+ LAST_TEMP_9,
+ LAST_TEMP_10,
+ LAST_TEMP_11,
+ LAST_TEMP_12,
+ LAST_TEMP_13,
+ LAST_TEMP_14,
+ LAST_TEMP_15,
+ VALID_0 = 23, /* VALID reading or not */
+ VALID_1,
+ VALID_2,
+ VALID_3,
+ VALID_4,
+ VALID_5,
+ VALID_6,
+ VALID_7,
+ VALID_8,
+ VALID_9,
+ VALID_10,
+ VALID_11,
+ VALID_12,
+ VALID_13,
+ VALID_14,
+ VALID_15,
+ MIN_STATUS_0, /* MIN threshold violated */
+ MIN_STATUS_1,
+ MIN_STATUS_2,
+ MIN_STATUS_3,
+ MIN_STATUS_4,
+ MIN_STATUS_5,
+ MIN_STATUS_6,
+ MIN_STATUS_7,
+ MIN_STATUS_8,
+ MIN_STATUS_9,
+ MIN_STATUS_10,
+ MIN_STATUS_11,
+ MIN_STATUS_12,
+ MIN_STATUS_13,
+ MIN_STATUS_14,
+ MIN_STATUS_15,
+ MAX_STATUS_0, /* MAX threshold violated */
+ MAX_STATUS_1,
+ MAX_STATUS_2,
+ MAX_STATUS_3,
+ MAX_STATUS_4,
+ MAX_STATUS_5,
+ MAX_STATUS_6,
+ MAX_STATUS_7,
+ MAX_STATUS_8,
+ MAX_STATUS_9,
+ MAX_STATUS_10,
+ MAX_STATUS_11,
+ MAX_STATUS_12,
+ MAX_STATUS_13,
+ MAX_STATUS_14,
+ MAX_STATUS_15,
+ LOWER_STATUS_0, /* LOWER threshold violated */
+ LOWER_STATUS_1,
+ LOWER_STATUS_2,
+ LOWER_STATUS_3,
+ LOWER_STATUS_4,
+ LOWER_STATUS_5,
+ LOWER_STATUS_6,
+ LOWER_STATUS_7,
+ LOWER_STATUS_8,
+ LOWER_STATUS_9,
+ LOWER_STATUS_10,
+ LOWER_STATUS_11,
+ LOWER_STATUS_12,
+ LOWER_STATUS_13,
+ LOWER_STATUS_14,
+ LOWER_STATUS_15,
+ UPPER_STATUS_0, /* UPPER threshold violated */
+ UPPER_STATUS_1,
+ UPPER_STATUS_2,
+ UPPER_STATUS_3,
+ UPPER_STATUS_4,
+ UPPER_STATUS_5,
+ UPPER_STATUS_6,
+ UPPER_STATUS_7,
+ UPPER_STATUS_8,
+ UPPER_STATUS_9,
+ UPPER_STATUS_10,
+ UPPER_STATUS_11,
+ UPPER_STATUS_12,
+ UPPER_STATUS_13,
+ UPPER_STATUS_14,
+ UPPER_STATUS_15,
+ CRITICAL_STATUS_0, /* CRITICAL threshold violated */
+ CRITICAL_STATUS_1,
+ CRITICAL_STATUS_2,
+ CRITICAL_STATUS_3,
+ CRITICAL_STATUS_4,
+ CRITICAL_STATUS_5,
+ CRITICAL_STATUS_6,
+ CRITICAL_STATUS_7,
+ CRITICAL_STATUS_8,
+ CRITICAL_STATUS_9,
+ CRITICAL_STATUS_10,
+ CRITICAL_STATUS_11,
+ CRITICAL_STATUS_12,
+ CRITICAL_STATUS_13,
+ CRITICAL_STATUS_14,
+ CRITICAL_STATUS_15,
+ /* TRDY */
+ TRDY,
+ /* INTERRUPT ENABLE */
+ INT_EN, /* Pre-V1, V1.x */
+ LOW_INT_EN, /* V2.x */
+ UP_INT_EN, /* V2.x */
+ CRIT_INT_EN, /* V2.x */
+
+ /* Keep last */
+ MAX_REGFIELDS
};
/**
- * struct tsens_data - tsens instance specific data
- * @num_sensors: Max number of sensors supported by platform
+ * struct tsens_features - Features supported by the IP
+ * @ver_major: Major number of IP version
+ * @crit_int: does the IP support critical interrupts?
+ * @adc: do the sensors only output adc code (instead of temperature)?
+ * @srot_split: does the IP neatly splits the register space into SROT and TM,
+ * with SROT only being available to secure boot firmware?
+ * @max_sensors: maximum sensors supported by this version of the IP
+ */
+struct tsens_features {
+ unsigned int ver_major;
+ unsigned int crit_int:1;
+ unsigned int adc:1;
+ unsigned int srot_split:1;
+ unsigned int max_sensors;
+};
+
+/**
+ * struct tsens_plat_data - tsens compile-time platform data
+ * @num_sensors: Number of sensors supported by platform
* @ops: operations the tsens instance supports
* @hw_ids: Subset of sensors ids supported by platform, if not the first n
- * @reg_offsets: Register offsets for commonly used registers
+ * @feat: features of the IP
+ * @fields: bitfield locations
*/
-struct tsens_data {
+struct tsens_plat_data {
const u32 num_sensors;
const struct tsens_ops *ops;
- const u16 reg_offsets[REG_ARRAY_SIZE];
unsigned int *hw_ids;
+ const struct tsens_features *feat;
+ const struct reg_field *fields;
};
-/* Registers to be saved/restored across a context loss */
+/**
+ * struct tsens_context - Registers to be saved/restored across a context loss
+ */
struct tsens_context {
int threshold;
int control;
};
-struct tsens_device {
+/**
+ * struct tsens_priv - private data for each instance of the tsens IP
+ * @dev: pointer to struct device
+ * @num_sensors: number of sensors enabled on this device
+ * @tm_map: pointer to TM register address space
+ * @srot_map: pointer to SROT register address space
+ * @tm_offset: deal with old device trees that don't address TM and SROT
+ * address space separately
+ * @rf: array of regmap_fields used to store value of the field
+ * @ctx: registers to be saved and restored during suspend/resume
+ * @feat: features of the IP
+ * @fields: bitfield locations
+ * @ops: pointer to list of callbacks supported by this device
+ * @sensor: list of sensors attached to this device
+ */
+struct tsens_priv {
struct device *dev;
u32 num_sensors;
struct regmap *tm_map;
struct regmap *srot_map;
u32 tm_offset;
- u16 reg_offsets[REG_ARRAY_SIZE];
+ struct regmap_field *rf[MAX_REGFIELDS];
struct tsens_context ctx;
+ const struct tsens_features *feat;
+ const struct reg_field *fields;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
};
-char *qfprom_read(struct device *, const char *);
-void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
-int init_common(struct tsens_device *);
-int get_temp_common(struct tsens_device *, int, int *);
+char *qfprom_read(struct device *dev, const char *cname);
+void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode);
+int init_common(struct tsens_priv *priv);
+int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp);
+int get_temp_common(struct tsens_priv *priv, int i, int *temp);
+bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id);
+
+/* TSENS target */
+extern const struct tsens_plat_data data_8960;
+
+/* TSENS v0.1 targets */
+extern const struct tsens_plat_data data_8916, data_8974;
/* TSENS v1 targets */
-extern const struct tsens_data data_8916, data_8974, data_8960;
+extern const struct tsens_plat_data data_tsens_v1;
+
/* TSENS v2 targets */
-extern const struct tsens_data data_8996, data_tsens_v2;
+extern const struct tsens_plat_data data_8996, data_tsens_v2;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 3b5f5b3fb1bc..7b364933bfb1 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -193,11 +193,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
- if (!np) {
- dev_err(&pdev->dev, "Device OF-Node is NULL");
- return -ENODEV;
- }
-
data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
GFP_KERNEL);
if (!data)
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 88fa41cf16e8..83f306265ee1 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -14,7 +14,6 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/thermal.h>
@@ -82,7 +81,6 @@ struct rcar_gen3_thermal_tsc {
struct rcar_gen3_thermal_priv {
struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
unsigned int num_tscs;
- spinlock_t lock; /* Protect interrupts on and off */
void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
};
@@ -232,38 +230,16 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
{
struct rcar_gen3_thermal_priv *priv = data;
u32 status;
- int i, ret = IRQ_HANDLED;
+ int i;
- spin_lock(&priv->lock);
for (i = 0; i < priv->num_tscs; i++) {
status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
if (status)
- ret = IRQ_WAKE_THREAD;
+ thermal_zone_device_update(priv->tscs[i]->zone,
+ THERMAL_EVENT_UNSPECIFIED);
}
- if (ret == IRQ_WAKE_THREAD)
- rcar_thermal_irq_set(priv, false);
-
- spin_unlock(&priv->lock);
-
- return ret;
-}
-
-static irqreturn_t rcar_gen3_thermal_irq_thread(int irq, void *data)
-{
- struct rcar_gen3_thermal_priv *priv = data;
- unsigned long flags;
- int i;
-
- for (i = 0; i < priv->num_tscs; i++)
- thermal_zone_device_update(priv->tscs[i]->zone,
- THERMAL_EVENT_UNSPECIFIED);
-
- spin_lock_irqsave(&priv->lock, flags);
- rcar_thermal_irq_set(priv, true);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return IRQ_HANDLED;
}
@@ -307,7 +283,7 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
- rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F);
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN, IRQ_TEMPD1 | IRQ_TEMP2);
@@ -331,6 +307,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
static int rcar_gen3_thermal_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev);
+
+ rcar_thermal_irq_set(priv, false);
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -371,8 +350,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (soc_device_match(r8a7795es1))
priv->thermal_init = rcar_gen3_thermal_init_r8a7795es1;
- spin_lock_init(&priv->lock);
-
platform_set_drvdata(pdev, priv);
/*
@@ -390,9 +367,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (!irqname)
return -ENOMEM;
- ret = devm_request_threaded_irq(dev, irq, rcar_gen3_thermal_irq,
- rcar_gen3_thermal_irq_thread,
- IRQF_SHARED, irqname, priv);
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rcar_gen3_thermal_irq,
+ IRQF_ONESHOT, irqname, priv);
if (ret)
return ret;
}
@@ -433,10 +410,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
}
tsc->zone = zone;
- ret = of_thermal_get_ntrips(tsc->zone);
- if (ret < 0)
- goto error_unregister;
-
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
if (ret)
@@ -448,6 +421,10 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
goto error_unregister;
}
+ ret = of_thermal_get_ntrips(tsc->zone);
+ if (ret < 0)
+ goto error_unregister;
+
dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 97462e9b40d8..d0873de718da 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -52,6 +52,7 @@ struct rcar_thermal_chip {
unsigned int irq_per_ch : 1;
unsigned int needs_suspend_resume : 1;
unsigned int nirqs;
+ unsigned int ctemp_bands;
};
static const struct rcar_thermal_chip rcar_thermal = {
@@ -60,6 +61,7 @@ static const struct rcar_thermal_chip rcar_thermal = {
.irq_per_ch = 0,
.needs_suspend_resume = 0,
.nirqs = 1,
+ .ctemp_bands = 1,
};
static const struct rcar_thermal_chip rcar_gen2_thermal = {
@@ -68,6 +70,7 @@ static const struct rcar_thermal_chip rcar_gen2_thermal = {
.irq_per_ch = 0,
.needs_suspend_resume = 0,
.nirqs = 1,
+ .ctemp_bands = 1,
};
static const struct rcar_thermal_chip rcar_gen3_thermal = {
@@ -80,6 +83,7 @@ static const struct rcar_thermal_chip rcar_gen3_thermal = {
* interrupts to detect a temperature change, rise or fall.
*/
.nirqs = 2,
+ .ctemp_bands = 2,
};
struct rcar_thermal_priv {
@@ -263,7 +267,12 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
return ret;
mutex_lock(&priv->lock);
- tmp = MCELSIUS((priv->ctemp * 5) - 65);
+ if (priv->chip->ctemp_bands == 1)
+ tmp = MCELSIUS((priv->ctemp * 5) - 65);
+ else if (priv->ctemp < 24)
+ tmp = MCELSIUS(((priv->ctemp * 55) - 720) / 10);
+ else
+ tmp = MCELSIUS((priv->ctemp * 5) - 60);
mutex_unlock(&priv->lock);
if ((tmp < MCELSIUS(-45)) || (tmp > MCELSIUS(125))) {
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9c7643d62ed7..bda1ca199abd 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -172,6 +172,9 @@ struct rockchip_thermal_data {
int tshut_temp;
enum tshut_mode tshut_mode;
enum tshut_polarity tshut_polarity;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state;
+ struct pinctrl_state *otp_state;
};
/**
@@ -222,11 +225,15 @@ struct rockchip_thermal_data {
#define GRF_TSADC_TESTBIT_L 0x0e648
#define GRF_TSADC_TESTBIT_H 0x0e64c
+#define PX30_GRF_SOC_CON2 0x0408
+
#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
#define GRF_TSADC_VCM_EN_L (0x10001 << 7)
#define GRF_TSADC_VCM_EN_H (0x10001 << 7)
+#define GRF_CON_TSADC_CH_INV (0x10001 << 1)
+
/**
* struct tsadc_table - code to temperature conversion table
* @code: the value of adc channel
@@ -689,6 +696,13 @@ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
regs + TSADCV2_AUTO_CON);
}
+static void rk_tsadcv4_initialize(struct regmap *grf, void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ rk_tsadcv2_initialize(grf, regs, tshut_polarity);
+ regmap_write(grf, PX30_GRF_SOC_CON2, GRF_CON_TSADC_CH_INV);
+}
+
static void rk_tsadcv2_irq_ack(void __iomem *regs)
{
u32 val;
@@ -818,6 +832,30 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
writel_relaxed(val, regs + TSADCV2_INT_EN);
}
+static const struct rockchip_tsadc_chip px30_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+ .chn_num = 2, /* 2 channels for tsadc */
+
+ .tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv4_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rk3328_code_table,
+ .length = ARRAY_SIZE(rk3328_code_table),
+ .data_mask = TSADCV2_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
.chn_num = 1, /* one channel for tsadc */
@@ -990,6 +1028,9 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
};
static const struct of_device_id of_rockchip_thermal_match[] = {
+ { .compatible = "rockchip,px30-tsadc",
+ .data = (void *)&px30_tsadc_data,
+ },
{
.compatible = "rockchip,rv1108-tsadc",
.data = (void *)&rv1108_tsadc_data,
@@ -1242,6 +1283,8 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
return error;
}
+ thermal->chip->control(thermal->regs, false);
+
error = clk_prepare_enable(thermal->clk);
if (error) {
dev_err(&pdev->dev, "failed to enable converter clock: %d\n",
@@ -1267,6 +1310,30 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->chip->initialize(thermal->grf, thermal->regs,
thermal->tshut_polarity);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO) {
+ thermal->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(thermal->pinctrl)) {
+ dev_err(&pdev->dev, "failed to find thermal pinctrl\n");
+ return PTR_ERR(thermal->pinctrl);
+ }
+
+ thermal->gpio_state = pinctrl_lookup_state(thermal->pinctrl,
+ "gpio");
+ if (IS_ERR_OR_NULL(thermal->gpio_state)) {
+ dev_err(&pdev->dev, "failed to find thermal gpio state\n");
+ return -EINVAL;
+ }
+
+ thermal->otp_state = pinctrl_lookup_state(thermal->pinctrl,
+ "otpout");
+ if (IS_ERR_OR_NULL(thermal->otp_state)) {
+ dev_err(&pdev->dev, "failed to find thermal otpout state\n");
+ return -EINVAL;
+ }
+
+ pinctrl_select_state(thermal->pinctrl, thermal->otp_state);
+ }
+
for (i = 0; i < thermal->chip->chn_num; i++) {
error = rockchip_thermal_register_sensor(pdev, thermal,
&thermal->sensors[i],
@@ -1337,8 +1404,8 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
clk_disable(thermal->pclk);
clk_disable(thermal->clk);
-
- pinctrl_pm_select_sleep_state(dev);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO)
+ pinctrl_select_state(thermal->pinctrl, thermal->gpio_state);
return 0;
}
@@ -1383,7 +1450,8 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
for (i = 0; i < thermal->chip->chn_num; i++)
rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
- pinctrl_pm_select_default_state(dev);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO)
+ pinctrl_select_state(thermal->pinctrl, thermal->otp_state);
return 0;
}
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index b80f9a9e4f8f..d8b1a4586d0b 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -3,9 +3,9 @@
#
config ST_THERMAL
- tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
- help
- Support for thermal sensors on STMicroelectronics STi series of SoCs.
+ tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
+ help
+ Support for thermal sensors on STMicroelectronics STi series of SoCs.
config ST_THERMAL_SYSCFG
select ST_THERMAL
@@ -16,11 +16,11 @@ config ST_THERMAL_MEMMAP
tristate "STi series memory mapped access based thermal sensors"
config STM32_THERMAL
- tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
- depends on MACH_STM32MP157
- default y
- help
- Support for thermal framework on STMicroelectronics STM32 series of
- SoCs. This thermal driver allows to access to general thermal framework
- functionalities and to acces to SoC sensor functionalities. This
- configuration is fully dependent of MACH_STM32MP157.
+ tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
+ depends on MACH_STM32MP157
+ default y
+ help
+ Support for thermal framework on STMicroelectronics STM32 series of
+ SoCs. This thermal driver allows to access to general thermal framework
+ functionalities and to acces to SoC sensor functionalities. This
+ configuration is fully dependent of MACH_STM32MP157.
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index bbd73c5a4a4e..cf9ddc52f30e 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -570,8 +570,7 @@ thermal_unprepare:
static int stm_thermal_suspend(struct device *dev)
{
int ret;
- struct platform_device *pdev = to_platform_device(dev);
- struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+ struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = stm_thermal_sensor_off(sensor);
if (ret)
@@ -585,8 +584,7 @@ static int stm_thermal_suspend(struct device *dev)
static int stm_thermal_resume(struct device *dev)
{
int ret;
- struct platform_device *pdev = to_platform_device(dev);
- struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+ struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = stm_thermal_prepare(sensor);
if (ret)
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index f8740f7852e3..fc0b33b3f26b 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -14,7 +14,7 @@ config TEGRA_BPMP_THERMAL
tristate "Tegra BPMP thermal sensing"
depends on TEGRA_BPMP || COMPILE_TEST
help
- Enable this option for support for sensing system temperature of NVIDIA
- Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+ Enable this option for support for sensing system temperature of NVIDIA
+ Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
endmenu
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 70043a28eb7a..fcf70a3728b6 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014 - 2018, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Mikko Perttunen <mperttunen@nvidia.com>
@@ -22,6 +23,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -85,12 +88,51 @@
#define THERMCTL_LVL0_UP_STATS 0x10
#define THERMCTL_LVL0_DN_STATS 0x14
+#define THERMCTL_INTR_STATUS 0x84
+
+#define TH_INTR_MD0_MASK BIT(25)
+#define TH_INTR_MU0_MASK BIT(24)
+#define TH_INTR_GD0_MASK BIT(17)
+#define TH_INTR_GU0_MASK BIT(16)
+#define TH_INTR_CD0_MASK BIT(9)
+#define TH_INTR_CU0_MASK BIT(8)
+#define TH_INTR_PD0_MASK BIT(1)
+#define TH_INTR_PU0_MASK BIT(0)
+#define TH_INTR_IGNORE_MASK 0xFCFCFCFC
+
#define THERMCTL_STATS_CTL 0x94
#define STATS_CTL_CLR_DN 0x8
#define STATS_CTL_EN_DN 0x4
#define STATS_CTL_CLR_UP 0x2
#define STATS_CTL_EN_UP 0x1
+#define OC1_CFG 0x310
+#define OC1_CFG_LONG_LATENCY_MASK BIT(6)
+#define OC1_CFG_HW_RESTORE_MASK BIT(5)
+#define OC1_CFG_PWR_GOOD_MASK_MASK BIT(4)
+#define OC1_CFG_THROTTLE_MODE_MASK (0x3 << 2)
+#define OC1_CFG_ALARM_POLARITY_MASK BIT(1)
+#define OC1_CFG_EN_THROTTLE_MASK BIT(0)
+
+#define OC1_CNT_THRESHOLD 0x314
+#define OC1_THROTTLE_PERIOD 0x318
+#define OC1_ALARM_COUNT 0x31c
+#define OC1_FILTER 0x320
+#define OC1_STATS 0x3a8
+
+#define OC_INTR_STATUS 0x39c
+#define OC_INTR_ENABLE 0x3a0
+#define OC_INTR_DISABLE 0x3a4
+#define OC_STATS_CTL 0x3c4
+#define OC_STATS_CTL_CLR_ALL 0x2
+#define OC_STATS_CTL_EN_ALL 0x1
+
+#define OC_INTR_OC1_MASK BIT(0)
+#define OC_INTR_OC2_MASK BIT(1)
+#define OC_INTR_OC3_MASK BIT(2)
+#define OC_INTR_OC4_MASK BIT(3)
+#define OC_INTR_OC5_MASK BIT(4)
+
#define THROT_GLOBAL_CFG 0x400
#define THROT_GLOBAL_ENB_MASK BIT(0)
@@ -160,6 +202,15 @@
/* get dividend from the depth */
#define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1)
+/* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-sochterm.h
+ * level vector
+ * NONE 3'b000
+ * LOW 3'b001
+ * MED 3'b011
+ * HIGH 3'b111
+ */
+#define THROT_LEVEL_TO_DEPTH(level) ((0x1 << (level)) - 1)
+
/* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
#define THROT_OFFSET 0x30
#define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \
@@ -173,6 +224,25 @@
#define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \
(THROT_OFFSET * throt))
+#define ALARM_OFFSET 0x14
+#define ALARM_CFG(throt) (OC1_CFG + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_CNT_THRESHOLD(throt) (OC1_CNT_THRESHOLD + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_THROTTLE_PERIOD(throt) (OC1_THROTTLE_PERIOD + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_ALARM_COUNT(throt) (OC1_ALARM_COUNT + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_FILTER(throt) (OC1_FILTER + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_STATS(throt) (OC1_STATS + \
+ (4 * (throt - THROTTLE_OC1)))
+
/* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
#define CCROC_THROT_OFFSET 0x0c
#define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \
@@ -184,15 +254,32 @@
#define THERMCTL_LVL_REGS_SIZE 0x20
#define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
+#define OC_THROTTLE_MODE_DISABLED 0
+#define OC_THROTTLE_MODE_BRIEF 2
+
static const int min_low_temp = -127000;
static const int max_high_temp = 127000;
enum soctherm_throttle_id {
THROTTLE_LIGHT = 0,
THROTTLE_HEAVY,
+ THROTTLE_OC1,
+ THROTTLE_OC2,
+ THROTTLE_OC3,
+ THROTTLE_OC4,
+ THROTTLE_OC5, /* OC5 is reserved */
THROTTLE_SIZE,
};
+enum soctherm_oc_irq_id {
+ TEGRA_SOC_OC_IRQ_1,
+ TEGRA_SOC_OC_IRQ_2,
+ TEGRA_SOC_OC_IRQ_3,
+ TEGRA_SOC_OC_IRQ_4,
+ TEGRA_SOC_OC_IRQ_5,
+ TEGRA_SOC_OC_IRQ_MAX,
+};
+
enum soctherm_throttle_dev_id {
THROTTLE_DEV_CPU = 0,
THROTTLE_DEV_GPU,
@@ -202,6 +289,11 @@ enum soctherm_throttle_dev_id {
static const char *const throt_names[] = {
[THROTTLE_LIGHT] = "light",
[THROTTLE_HEAVY] = "heavy",
+ [THROTTLE_OC1] = "oc1",
+ [THROTTLE_OC2] = "oc2",
+ [THROTTLE_OC3] = "oc3",
+ [THROTTLE_OC4] = "oc4",
+ [THROTTLE_OC5] = "oc5",
};
struct tegra_soctherm;
@@ -213,12 +305,23 @@ struct tegra_thermctl_zone {
const struct tegra_tsensor_group *sg;
};
+struct soctherm_oc_cfg {
+ u32 active_low;
+ u32 throt_period;
+ u32 alarm_cnt_thresh;
+ u32 alarm_filter;
+ u32 mode;
+ bool intr_en;
+};
+
struct soctherm_throt_cfg {
const char *name;
unsigned int id;
u8 priority;
u8 cpu_throt_level;
u32 cpu_throt_depth;
+ u32 gpu_throt_level;
+ struct soctherm_oc_cfg oc_cfg;
struct thermal_cooling_device *cdev;
bool init;
};
@@ -231,6 +334,9 @@ struct tegra_soctherm {
void __iomem *clk_regs;
void __iomem *ccroc_regs;
+ int thermal_irq;
+ int edp_irq;
+
u32 *calib;
struct thermal_zone_device **thermctl_tzs;
struct tegra_soctherm_soc *soc;
@@ -238,8 +344,19 @@ struct tegra_soctherm {
struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
struct dentry *debugfs_dir;
+
+ struct mutex thermctl_lock;
};
+struct soctherm_oc_irq_chip_data {
+ struct mutex irq_lock; /* serialize OC IRQs */
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+ int irq_enable;
+};
+
+static struct soctherm_oc_irq_chip_data soc_irq_cdata;
+
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
@@ -446,6 +563,24 @@ find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
return NULL;
}
+static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
+{
+ int i, temp = min_low_temp;
+ struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
+
+ if (id >= TEGRA124_SOCTHERM_SENSOR_NUM)
+ return temp;
+
+ if (tt) {
+ for (i = 0; i < ts->soc->num_ttgs; i++) {
+ if (tt[i].id == id)
+ return tt[i].temp;
+ }
+ }
+
+ return temp;
+}
+
static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
{
struct tegra_thermctl_zone *zone = data;
@@ -464,7 +599,16 @@ static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
return ret;
if (type == THERMAL_TRIP_CRITICAL) {
- return thermtrip_program(dev, sg, temp);
+ /*
+ * If thermtrips property is set in DT,
+ * doesn't need to program critical type trip to HW,
+ * if not, program critical trip to HW.
+ */
+ if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id))
+ return thermtrip_program(dev, sg, temp);
+ else
+ return 0;
+
} else if (type == THERMAL_TRIP_HOT) {
int i;
@@ -519,10 +663,60 @@ static int tegra_thermctl_get_trend(void *data, int trip,
return 0;
}
+static void thermal_irq_enable(struct tegra_thermctl_zone *zn)
+{
+ u32 r;
+
+ /* multiple zones could be handling and setting trips at once */
+ mutex_lock(&zn->ts->thermctl_lock);
+ r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE);
+ r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN);
+ writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE);
+ mutex_unlock(&zn->ts->thermctl_lock);
+}
+
+static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
+{
+ u32 r;
+
+ /* multiple zones could be handling and setting trips at once */
+ mutex_lock(&zn->ts->thermctl_lock);
+ r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE);
+ r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0);
+ writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE);
+ mutex_unlock(&zn->ts->thermctl_lock);
+}
+
+static int tegra_thermctl_set_trips(void *data, int lo, int hi)
+{
+ struct tegra_thermctl_zone *zone = data;
+ u32 r;
+
+ thermal_irq_disable(zone);
+
+ r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0);
+ writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+
+ lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
+ hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
+ dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
+
+ r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
+ r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
+ writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+
+ thermal_irq_enable(zone);
+
+ return 0;
+}
+
static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
.get_temp = tegra_thermctl_get_temp,
.set_trip_temp = tegra_thermctl_set_trip_temp,
.get_trend = tegra_thermctl_get_trend,
+ .set_trips = tegra_thermctl_set_trips,
};
static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
@@ -555,7 +749,8 @@ static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
* @dev: struct device * of the SOC_THERM instance
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
- * "THROTTLE" trip points , using "critical" or "hot" type trip_temp
+ * "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
+ * type trip_temp
* from thermal zone.
* After they have been configured, THERMTRIP or THROTTLE will take
* action when the configured SoC thermal sensor group reaches a
@@ -577,28 +772,23 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct soctherm_throt_cfg *stc;
- int i, trip, temperature;
- int ret;
+ int i, trip, temperature, ret;
- ret = tz->ops->get_crit_temp(tz, &temperature);
- if (ret) {
- dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
- sg->name);
- goto set_throttle;
- }
+ /* Get thermtrips. If missing, try to get critical trips. */
+ temperature = tsensor_group_thermtrip_get(ts, sg->id);
+ if (min_low_temp == temperature)
+ if (tz->ops->get_crit_temp(tz, &temperature))
+ temperature = max_high_temp;
ret = thermtrip_program(dev, sg, temperature);
if (ret) {
- dev_err(dev, "thermtrip: %s: error during enable\n",
- sg->name);
+ dev_err(dev, "thermtrip: %s: error during enable\n", sg->name);
return ret;
}
- dev_info(dev,
- "thermtrip: will shut down when %s reaches %d mC\n",
+ dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n",
sg->name, temperature);
-set_throttle:
ret = get_hot_temp(tz, &trip, &temperature);
if (ret) {
dev_info(dev, "throttrip: %s: missing hot temperature\n",
@@ -606,7 +796,7 @@ set_throttle:
return 0;
}
- for (i = 0; i < THROTTLE_SIZE; i++) {
+ for (i = 0; i < THROTTLE_OC1; i++) {
struct thermal_cooling_device *cdev;
if (!ts->throt_cfgs[i].init)
@@ -638,6 +828,461 @@ set_throttle:
return 0;
}
+static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id)
+{
+ struct tegra_soctherm *ts = dev_id;
+ u32 r;
+
+ /* Case for no lock:
+ * Although interrupts are enabled in set_trips, there is still no need
+ * to lock here because the interrupts are disabled before programming
+ * new trip points. Hence there cant be a interrupt on the same sensor.
+ * An interrupt can however occur on a sensor while trips are being
+ * programmed on a different one. This beign a LEVEL interrupt won't
+ * cause a new interrupt but this is taken care of by the re-reading of
+ * the STATUS register in the thread function.
+ */
+ r = readl(ts->regs + THERMCTL_INTR_STATUS);
+ writel(r, ts->regs + THERMCTL_INTR_DISABLE);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * soctherm_thermal_isr_thread() - Handles a thermal interrupt request
+ * @irq: The interrupt number being requested; not used
+ * @dev_id: Opaque pointer to tegra_soctherm;
+ *
+ * Clears the interrupt status register if there are expected
+ * interrupt bits set.
+ * The interrupt(s) are then handled by updating the corresponding
+ * thermal zones.
+ *
+ * An error is logged if any unexpected interrupt bits are set.
+ *
+ * Disabled interrupts are re-enabled.
+ *
+ * Return: %IRQ_HANDLED. Interrupt was handled and no further processing
+ * is needed.
+ */
+static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
+{
+ struct tegra_soctherm *ts = dev_id;
+ struct thermal_zone_device *tz;
+ u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0;
+
+ st = readl(ts->regs + THERMCTL_INTR_STATUS);
+
+ /* deliberately clear expected interrupts handled in SW */
+ cp |= st & TH_INTR_CD0_MASK;
+ cp |= st & TH_INTR_CU0_MASK;
+
+ gp |= st & TH_INTR_GD0_MASK;
+ gp |= st & TH_INTR_GU0_MASK;
+
+ pl |= st & TH_INTR_PD0_MASK;
+ pl |= st & TH_INTR_PU0_MASK;
+
+ me |= st & TH_INTR_MD0_MASK;
+ me |= st & TH_INTR_MU0_MASK;
+
+ ex |= cp | gp | pl | me;
+ if (ex) {
+ writel(ex, ts->regs + THERMCTL_INTR_STATUS);
+ st &= ~ex;
+
+ if (cp) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (gp) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (pl) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (me) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+ }
+
+ /* deliberately ignore expected interrupts NOT handled in SW */
+ ex |= TH_INTR_IGNORE_MASK;
+ st &= ~ex;
+
+ if (st) {
+ /* Whine about any other unexpected INTR bits still set */
+ pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st);
+ writel(st, ts->regs + THERMCTL_INTR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
+ * @alarm: The soctherm throttle id
+ * @enable: Flag indicating enable the soctherm over-current
+ * interrupt or disable it
+ *
+ * Enables a specific over-current pins @alarm to raise an interrupt if the flag
+ * is set and the alarm corresponds to OC1, OC2, OC3, or OC4.
+ */
+static void soctherm_oc_intr_enable(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id alarm,
+ bool enable)
+{
+ u32 r;
+
+ if (!enable)
+ return;
+
+ r = readl(ts->regs + OC_INTR_ENABLE);
+ switch (alarm) {
+ case THROTTLE_OC1:
+ r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1);
+ break;
+ case THROTTLE_OC2:
+ r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1);
+ break;
+ case THROTTLE_OC3:
+ r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1);
+ break;
+ case THROTTLE_OC4:
+ r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1);
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ writel(r, ts->regs + OC_INTR_ENABLE);
+}
+
+/**
+ * soctherm_handle_alarm() - Handles soctherm alarms
+ * @alarm: The soctherm throttle id
+ *
+ * "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing
+ * a warning or informative message.
+ *
+ * Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success).
+ */
+static int soctherm_handle_alarm(enum soctherm_throttle_id alarm)
+{
+ int rv = -EINVAL;
+
+ switch (alarm) {
+ case THROTTLE_OC1:
+ pr_debug("soctherm: Successfully handled OC1 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC2:
+ pr_debug("soctherm: Successfully handled OC2 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC3:
+ pr_debug("soctherm: Successfully handled OC3 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC4:
+ pr_debug("soctherm: Successfully handled OC4 alarm\n");
+ rv = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ if (rv)
+ pr_err("soctherm: ERROR in handling %s alarm\n",
+ throt_names[alarm]);
+
+ return rv;
+}
+
+/**
+ * soctherm_edp_isr_thread() - log an over-current interrupt request
+ * @irq: OC irq number. Currently not being used. See description
+ * @arg: a void pointer for callback, currently not being used
+ *
+ * Over-current events are handled in hardware. This function is called to log
+ * and handle any OC events that happened. Additionally, it checks every
+ * over-current interrupt registers for registers are set but
+ * was not expected (i.e. any discrepancy in interrupt status) by the function,
+ * the discrepancy will logged.
+ *
+ * Return: %IRQ_HANDLED
+ */
+static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg)
+{
+ struct tegra_soctherm *ts = arg;
+ u32 st, ex, oc1, oc2, oc3, oc4;
+
+ st = readl(ts->regs + OC_INTR_STATUS);
+
+ /* deliberately clear expected interrupts handled in SW */
+ oc1 = st & OC_INTR_OC1_MASK;
+ oc2 = st & OC_INTR_OC2_MASK;
+ oc3 = st & OC_INTR_OC3_MASK;
+ oc4 = st & OC_INTR_OC4_MASK;
+ ex = oc1 | oc2 | oc3 | oc4;
+
+ pr_err("soctherm: OC ALARM 0x%08x\n", ex);
+ if (ex) {
+ writel(st, ts->regs + OC_INTR_STATUS);
+ st &= ~ex;
+
+ if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC1, true);
+
+ if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC2, true);
+
+ if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC3, true);
+
+ if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC4, true);
+
+ if (oc1 && soc_irq_cdata.irq_enable & BIT(0))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 0));
+
+ if (oc2 && soc_irq_cdata.irq_enable & BIT(1))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 1));
+
+ if (oc3 && soc_irq_cdata.irq_enable & BIT(2))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 2));
+
+ if (oc4 && soc_irq_cdata.irq_enable & BIT(3))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 3));
+ }
+
+ if (st) {
+ pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st);
+ writel(st, ts->regs + OC_INTR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * soctherm_edp_isr() - Disables any active interrupts
+ * @irq: The interrupt request number
+ * @arg: Opaque pointer to an argument
+ *
+ * Writes to the OC_INTR_DISABLE register the over current interrupt status,
+ * masking any asserted interrupts. Doing this prevents the same interrupts
+ * from triggering this isr repeatedly. The thread woken by this isr will
+ * handle asserted interrupts and subsequently unmask/re-enable them.
+ *
+ * The OC_INTR_DISABLE register indicates which OC interrupts
+ * have been disabled.
+ *
+ * Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread
+ */
+static irqreturn_t soctherm_edp_isr(int irq, void *arg)
+{
+ struct tegra_soctherm *ts = arg;
+ u32 r;
+
+ if (!ts)
+ return IRQ_NONE;
+
+ r = readl(ts->regs + OC_INTR_STATUS);
+ writel(r, ts->regs + OC_INTR_DISABLE);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * soctherm_oc_irq_lock() - locks the over-current interrupt request
+ * @data: Interrupt request data
+ *
+ * Looks up the chip data from @data and locks the mutex associated with
+ * a particular over-current interrupt request.
+ */
+static void soctherm_oc_irq_lock(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->irq_lock);
+}
+
+/**
+ * soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request
+ * @data: Interrupt request data
+ *
+ * Looks up the interrupt request data @data and unlocks the mutex associated
+ * with a particular over-current interrupt request.
+ */
+static void soctherm_oc_irq_sync_unlock(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_unlock(&d->irq_lock);
+}
+
+/**
+ * soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue
+ * @data: irq_data structure of the chip
+ *
+ * Sets the irq_enable bit of SOC_THERM allowing SOC_THERM
+ * to respond to over-current interrupts.
+ *
+ */
+static void soctherm_oc_irq_enable(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ d->irq_enable |= BIT(data->hwirq);
+}
+
+/**
+ * soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
+ * @irq_data: The interrupt request information
+ *
+ * Clears the interrupt request enable bit of the overcurrent
+ * interrupt request chip data.
+ *
+ * Return: Nothing is returned (void)
+ */
+static void soctherm_oc_irq_disable(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ d->irq_enable &= ~BIT(data->hwirq);
+}
+
+static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ return 0;
+}
+
+/**
+ * soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper
+ * @h: Interrupt request domain
+ * @virq: Virtual interrupt request number
+ * @hw: Hardware interrupt request number
+ *
+ * Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM
+ * interrupt request is called, the irq_domain takes the request's virtual
+ * request number (much like a virtual memory address) and maps it to a
+ * physical hardware request number.
+ *
+ * When a mapping doesn't already exist for a virtual request number, the
+ * irq_domain calls this function to associate the virtual request number with
+ * a hardware request number.
+ *
+ * Return: 0
+ */
+static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct soctherm_oc_irq_chip_data *data = h->host_data;
+
+ irq_set_chip_data(virq, data);
+ irq_set_chip(virq, &data->irq_chip);
+ irq_set_nested_thread(virq, 1);
+ return 0;
+}
+
+/**
+ * soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
+ * @d: Interrupt request domain
+ * @intspec: Array of u32s from DTs "interrupt" property
+ * @intsize: Number of values inside the intspec array
+ * @out_hwirq: HW IRQ value associated with this interrupt
+ * @out_type: The IRQ SENSE type for this interrupt.
+ *
+ * This Device Tree IRQ specifier translation function will translate a
+ * specific "interrupt" as defined by 2 DT values where the cell values map
+ * the hwirq number + 1 and linux irq flags. Since the output is the hwirq
+ * number, this function will subtract 1 from the value listed in DT.
+ *
+ * Return: 0
+ */
+static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d,
+ struct device_node *ctrlr, const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 2))
+ return -EINVAL;
+
+ /*
+ * The HW value is 1 index less than the DT IRQ values.
+ * i.e. OC4 goes to HW index 3.
+ */
+ *out_hwirq = intspec[0] - 1;
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static const struct irq_domain_ops soctherm_oc_domain_ops = {
+ .map = soctherm_oc_irq_map,
+ .xlate = soctherm_irq_domain_xlate_twocell,
+};
+
+/**
+ * soctherm_oc_int_init() - Initial enabling of the over
+ * current interrupts
+ * @np: The devicetree node for soctherm
+ * @num_irqs: The number of new interrupt requests
+ *
+ * Sets the over current interrupt request chip data
+ *
+ * Return: 0 on success or if overcurrent interrupts are not enabled,
+ * -ENOMEM (out of memory), or irq_base if the function failed to
+ * allocate the irqs
+ */
+static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
+{
+ if (!num_irqs) {
+ pr_info("%s(): OC interrupts are not enabled\n", __func__);
+ return 0;
+ }
+
+ mutex_init(&soc_irq_cdata.irq_lock);
+ soc_irq_cdata.irq_enable = 0;
+
+ soc_irq_cdata.irq_chip.name = "soc_therm_oc";
+ soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock;
+ soc_irq_cdata.irq_chip.irq_bus_sync_unlock =
+ soctherm_oc_irq_sync_unlock;
+ soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable;
+ soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable;
+ soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
+ soc_irq_cdata.irq_chip.irq_set_wake = NULL;
+
+ soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs,
+ &soctherm_oc_domain_ops,
+ &soc_irq_cdata);
+
+ if (!soc_irq_cdata.domain) {
+ pr_err("%s: Failed to create IRQ domain\n", __func__);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s(): OC interrupts enabled successful\n", __func__);
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
static int regs_show(struct seq_file *s, void *data)
{
@@ -929,6 +1574,120 @@ static const struct thermal_cooling_device_ops throt_cooling_ops = {
.set_cur_state = throt_set_cdev_state,
};
+static int soctherm_thermtrips_parse(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
+ const int max_num_prop = ts->soc->num_ttgs * 2;
+ u32 *tlb;
+ int i, j, n, ret;
+
+ if (!tt)
+ return -ENOMEM;
+
+ n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips");
+ if (n <= 0) {
+ dev_info(dev,
+ "missing thermtrips, will use critical trips as shut down temp\n");
+ return n;
+ }
+
+ n = min(max_num_prop, n);
+
+ tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL);
+ if (!tlb)
+ return -ENOMEM;
+ ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips",
+ tlb, n);
+ if (ret) {
+ dev_err(dev, "invalid num ele: thermtrips:%d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ for (j = 0; j < n; j = j + 2) {
+ if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM)
+ continue;
+
+ tt[i].id = tlb[j];
+ tt[i].temp = tlb[j + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+static void soctherm_oc_cfg_parse(struct device *dev,
+ struct device_node *np_oc,
+ struct soctherm_throt_cfg *stc)
+{
+ u32 val;
+
+ if (of_property_read_bool(np_oc, "nvidia,polarity-active-low"))
+ stc->oc_cfg.active_low = 1;
+ else
+ stc->oc_cfg.active_low = 0;
+
+ if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) {
+ stc->oc_cfg.intr_en = 1;
+ stc->oc_cfg.alarm_cnt_thresh = val;
+ }
+
+ if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val))
+ stc->oc_cfg.throt_period = val;
+
+ if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val))
+ stc->oc_cfg.alarm_filter = val;
+
+ /* BRIEF throttling by default, do not support STICKY */
+ stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF;
+}
+
+static int soctherm_throt_cfg_parse(struct device *dev,
+ struct device_node *np,
+ struct soctherm_throt_cfg *stc)
+{
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nvidia,priority", &val);
+ if (ret) {
+ dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name);
+ return -EINVAL;
+ }
+ stc->priority = val;
+
+ ret = of_property_read_u32(np, ts->soc->use_ccroc ?
+ "nvidia,cpu-throt-level" :
+ "nvidia,cpu-throt-percent", &val);
+ if (!ret) {
+ if (ts->soc->use_ccroc &&
+ val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
+ stc->cpu_throt_level = val;
+ else if (!ts->soc->use_ccroc && val <= 100)
+ stc->cpu_throt_depth = val;
+ else
+ goto err;
+ } else {
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val);
+ if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
+ stc->gpu_throt_level = val;
+ else
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n",
+ stc->name);
+ return -EINVAL;
+}
+
/**
* soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
* and register them as cooling devices.
@@ -939,8 +1698,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct device_node *np_stc, *np_stcc;
const char *name;
- u32 val;
- int i, r;
+ int i;
for (i = 0; i < THROTTLE_SIZE; i++) {
ts->throt_cfgs[i].name = throt_names[i];
@@ -958,6 +1716,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
for_each_child_of_node(np_stc, np_stcc) {
struct soctherm_throt_cfg *stc;
struct thermal_cooling_device *tcd;
+ int err;
name = np_stcc->name;
stc = find_throttle_cfg_by_name(ts, name);
@@ -967,51 +1726,34 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
continue;
}
- r = of_property_read_u32(np_stcc, "nvidia,priority", &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing priority\n", name);
- continue;
+ if (stc->init) {
+ dev_err(dev, "throttle-cfg: %s: redefined!\n", name);
+ of_node_put(np_stcc);
+ break;
}
- stc->priority = val;
-
- if (ts->soc->use_ccroc) {
- r = of_property_read_u32(np_stcc,
- "nvidia,cpu-throt-level",
- &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing cpu-throt-level\n",
- name);
- continue;
- }
- stc->cpu_throt_level = val;
+
+ err = soctherm_throt_cfg_parse(dev, np_stcc, stc);
+ if (err)
+ continue;
+
+ if (stc->id >= THROTTLE_OC1) {
+ soctherm_oc_cfg_parse(dev, np_stcc, stc);
+ stc->init = true;
} else {
- r = of_property_read_u32(np_stcc,
- "nvidia,cpu-throt-percent",
- &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing cpu-throt-percent\n",
- name);
- continue;
- }
- stc->cpu_throt_depth = val;
- }
- tcd = thermal_of_cooling_device_register(np_stcc,
+ tcd = thermal_of_cooling_device_register(np_stcc,
(char *)name, ts,
&throt_cooling_ops);
- of_node_put(np_stcc);
- if (IS_ERR_OR_NULL(tcd)) {
- dev_err(dev,
- "throttle-cfg: %s: failed to register cooling device\n",
- name);
- continue;
+ if (IS_ERR_OR_NULL(tcd)) {
+ dev_err(dev,
+ "throttle-cfg: %s: failed to register cooling device\n",
+ name);
+ continue;
+ }
+ stc->cdev = tcd;
+ stc->init = true;
}
- stc->cdev = tcd;
- stc->init = true;
}
of_node_put(np_stc);
@@ -1141,6 +1883,50 @@ static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
}
/**
+ * throttlectl_gpu_level_select() - selects throttling level for GPU
+ * @throt: the LIGHT/HEAVY of throttle event id
+ *
+ * This function programs soctherm's interface to GK20a NV_THERM to select
+ * pre-configured "Low", "Medium" or "Heavy" throttle levels.
+ *
+ * Return: boolean true if HW was programmed
+ */
+static void throttlectl_gpu_level_select(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r, level, throt_vect;
+
+ level = ts->throt_cfgs[throt].gpu_throt_level;
+ throt_vect = THROT_LEVEL_TO_DEPTH(level);
+ r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect);
+ writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
+}
+
+static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r;
+ struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg;
+
+ if (oc->mode == OC_THROTTLE_MODE_DISABLED)
+ return -EINVAL;
+
+ r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1);
+ r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode);
+ r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low);
+ r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1);
+ writel(r, ts->regs + ALARM_CFG(throt));
+ writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt));
+ writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt));
+ writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt));
+ soctherm_oc_intr_enable(ts, throt, oc->intr_en);
+
+ return 0;
+}
+
+/**
* soctherm_throttle_program() - programs pulse skippers' configuration
* @throt: the LIGHT/HEAVY of the throttle event id.
*
@@ -1156,12 +1942,17 @@ static void soctherm_throttle_program(struct tegra_soctherm *ts,
if (!stc.init)
return;
+ if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt)))
+ return;
+
/* Setup PSKIP parameters */
if (ts->soc->use_ccroc)
throttlectl_cpu_level_select(ts, throt);
else
throttlectl_cpu_mn(ts, throt);
+ throttlectl_gpu_level_select(ts, throt);
+
r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
@@ -1215,6 +2006,57 @@ static void tegra_soctherm_throttle(struct device *dev)
writel(v, ts->regs + THERMCTL_STATS_CTL);
}
+static int soctherm_interrupts_init(struct platform_device *pdev,
+ struct tegra_soctherm *tegra)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "soctherm_oc_int_init failed\n");
+ return ret;
+ }
+
+ tegra->thermal_irq = platform_get_irq(pdev, 0);
+ if (tegra->thermal_irq < 0) {
+ dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n");
+ return 0;
+ }
+
+ tegra->edp_irq = platform_get_irq(pdev, 1);
+ if (tegra->edp_irq < 0) {
+ dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n");
+ return 0;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ tegra->thermal_irq,
+ soctherm_thermal_isr,
+ soctherm_thermal_isr_thread,
+ IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ tegra);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ tegra->edp_irq,
+ soctherm_edp_isr,
+ soctherm_edp_isr_thread,
+ IRQF_ONESHOT,
+ "soctherm_edp",
+ tegra);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static void soctherm_init(struct platform_device *pdev)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
@@ -1292,6 +2134,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (!tegra)
return -ENOMEM;
+ mutex_init(&tegra->thermctl_lock);
dev_set_drvdata(&pdev->dev, tegra);
tegra->soc = soc;
@@ -1370,6 +2213,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (err)
return err;
+ soctherm_thermtrips_parse(pdev);
+
soctherm_init_hw_throt_cdev(pdev);
soctherm_init(pdev);
@@ -1406,6 +2251,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
goto disable_clocks;
}
+ err = soctherm_interrupts_init(pdev, tegra);
+
soctherm_debug_init(pdev);
return 0;
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
index e96ca73fd780..70501e73d586 100644
--- a/drivers/thermal/tegra/soctherm.h
+++ b/drivers/thermal/tegra/soctherm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
@@ -29,6 +30,14 @@
#define THERMCTL_THERMTRIP_CTL 0x80
/* BITs are defined in device file */
+#define THERMCTL_INTR_ENABLE 0x88
+#define THERMCTL_INTR_DISABLE 0x8c
+#define TH_INTR_UP_DN_EN 0x3
+#define THERM_IRQ_MEM_MASK (TH_INTR_UP_DN_EN << 24)
+#define THERM_IRQ_GPU_MASK (TH_INTR_UP_DN_EN << 16)
+#define THERM_IRQ_CPU_MASK (TH_INTR_UP_DN_EN << 8)
+#define THERM_IRQ_TSENSE_MASK (TH_INTR_UP_DN_EN << 0)
+
#define SENSOR_PDIV 0x1c0
#define SENSOR_PDIV_CPU_MASK (0xf << 12)
#define SENSOR_PDIV_GPU_MASK (0xf << 8)
@@ -70,6 +79,7 @@ struct tegra_tsensor_group {
u32 thermtrip_enable_mask;
u32 thermtrip_any_en_mask;
u32 thermtrip_threshold_mask;
+ u32 thermctl_isr_mask;
u16 thermctl_lvl0_offset;
u32 thermctl_lvl0_up_thresh_mask;
u32 thermctl_lvl0_dn_thresh_mask;
@@ -92,6 +102,11 @@ struct tegra_tsensor {
const struct tegra_tsensor_group *group;
};
+struct tsensor_group_thermtrips {
+ u8 id;
+ u32 temp;
+};
+
struct tegra_soctherm_fuse {
u32 fuse_base_cp_mask, fuse_base_cp_shift;
u32 fuse_base_ft_mask, fuse_base_ft_shift;
@@ -113,6 +128,7 @@ struct tegra_soctherm_soc {
const int thresh_grain;
const unsigned int bptt;
const bool use_ccroc;
+ struct tsensor_group_thermtrips *thermtrips;
};
int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
index 36768630f78c..20ad27f4d1a1 100644
--- a/drivers/thermal/tegra/tegra124-soctherm.c
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -55,6 +56,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -73,6 +75,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -89,6 +92,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -107,6 +111,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
index 97fa30501eb1..b76308fdad9e 100644
--- a/drivers/thermal/tegra/tegra132-soctherm.c
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -55,6 +56,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -73,6 +75,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -89,6 +92,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -107,6 +111,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
index ad53169a8e95..d31b50050faa 100644
--- a/drivers/thermal/tegra/tegra210-soctherm.c
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -56,6 +57,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -74,6 +76,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -90,6 +93,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -108,6 +112,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -203,6 +208,13 @@ static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
.fuse_spare_realignment = 0,
};
+struct tsensor_group_thermtrips tegra210_tsensor_thermtrips[] = {
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+};
+
const struct tegra_soctherm_soc tegra210_soctherm = {
.tsensors = tegra210_tsensors,
.num_tsensors = ARRAY_SIZE(tegra210_tsensors),
@@ -212,4 +224,5 @@ const struct tegra_soctherm_soc tegra210_soctherm = {
.thresh_grain = TEGRA210_THRESH_GRAIN,
.bptt = TEGRA210_BPTT,
.use_ccroc = false,
+ .thermtrips = tegra210_tsensor_thermtrips,
};
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index e22fc60ad36d..deb244f12de4 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -29,6 +29,9 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
int temp, temp_hi, temp_lo, adc_hi, adc_lo;
int i;
+ if (!gti->lookup_table)
+ return val;
+
for (i = 0; i < gti->nlookup_table; i++) {
if (val >= gti->lookup_table[2 * i + 1])
break;
@@ -81,9 +84,9 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev,
ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
sizeof(u32));
- if (ntable < 0) {
- dev_err(dev, "Lookup table is not provided\n");
- return ntable;
+ if (ntable <= 0) {
+ dev_notice(dev, "no lookup table, assuming DAC channel returns milliCelcius\n");
+ return 0;
}
if (ntable % 2) {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6590bb5cb688..46cfb7de4eb2 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -266,7 +266,7 @@ static int __init thermal_register_governors(void)
return thermal_gov_power_allocator_register();
}
-static void thermal_unregister_governors(void)
+static void __init thermal_unregister_governors(void)
{
thermal_gov_step_wise_unregister();
thermal_gov_fair_share_unregister();
@@ -941,7 +941,7 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
*/
static struct thermal_cooling_device *
__thermal_cooling_device_register(struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
@@ -1015,7 +1015,7 @@ __thermal_cooling_device_register(struct device_node *np,
* ERR_PTR. Caller must check return value with IS_ERR*() helpers.
*/
struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
+thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return __thermal_cooling_device_register(NULL, type, devdata, ops);
@@ -1039,13 +1039,62 @@ EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
*/
struct thermal_cooling_device *
thermal_of_cooling_device_register(struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return __thermal_cooling_device_register(np, type, devdata, ops);
}
EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
+static void thermal_cooling_device_release(struct device *dev, void *res)
+{
+ thermal_cooling_device_unregister(
+ *(struct thermal_cooling_device **)res);
+}
+
+/**
+ * devm_thermal_of_cooling_device_register() - register an OF thermal cooling
+ * device
+ * @dev: a valid struct device pointer of a sensor device.
+ * @np: a pointer to a device tree node.
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ *
+ * This function will register a cooling device with device tree node reference.
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
+ */
+struct thermal_cooling_device *
+devm_thermal_of_cooling_device_register(struct device *dev,
+ struct device_node *np,
+ char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{
+ struct thermal_cooling_device **ptr, *tcd;
+
+ ptr = devres_alloc(thermal_cooling_device_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ tcd = __thermal_cooling_device_register(np, type, devdata, ops);
+ if (IS_ERR(tcd)) {
+ devres_free(ptr);
+ return tcd;
+ }
+
+ *ptr = tcd;
+ devres_add(dev, ptr);
+
+ return tcd;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register);
+
static void __unbind(struct thermal_zone_device *tz, int mask,
struct thermal_cooling_device *cdev)
{
@@ -1494,6 +1543,7 @@ static int thermal_pm_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
struct thermal_zone_device *tz;
+ enum thermal_device_mode tz_mode;
switch (mode) {
case PM_HIBERNATION_PREPARE:
@@ -1506,6 +1556,13 @@ static int thermal_pm_notify(struct notifier_block *nb,
case PM_POST_SUSPEND:
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
+ tz_mode = THERMAL_DEVICE_ENABLED;
+ if (tz->ops->get_mode)
+ tz->ops->get_mode(tz, &tz_mode);
+
+ if (tz_mode == THERMAL_DEVICE_DISABLED)
+ continue;
+
thermal_zone_device_init(tz);
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
@@ -1563,19 +1620,4 @@ error:
mutex_destroy(&poweroff_lock);
return result;
}
-
-static void __exit thermal_exit(void)
-{
- unregister_pm_notifier(&thermal_pm_nb);
- of_thermal_destroy_zones();
- genetlink_exit();
- class_unregister(&thermal_class);
- thermal_unregister_governors();
- ida_destroy(&thermal_tz_ida);
- ida_destroy(&thermal_cdev_ida);
- mutex_destroy(&thermal_list_lock);
- mutex_destroy(&thermal_governor_lock);
-}
-
fs_initcall(thermal_init);
-module_exit(thermal_exit);
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
new file mode 100644
index 000000000000..de3cceea23bc
--- /dev/null
+++ b/drivers/thermal/thermal_mmio.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+struct thermal_mmio {
+ void __iomem *mmio_base;
+ u32 (*read_mmio)(void __iomem *mmio_base);
+ u32 mask;
+ int factor;
+};
+
+static u32 thermal_mmio_readb(void __iomem *mmio_base)
+{
+ return readb(mmio_base);
+}
+
+static int thermal_mmio_get_temperature(void *private, int *temp)
+{
+ int t;
+ struct thermal_mmio *sensor =
+ (struct thermal_mmio *)private;
+
+ t = sensor->read_mmio(sensor->mmio_base) & sensor->mask;
+ t *= sensor->factor;
+
+ *temp = t;
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops thermal_mmio_ops = {
+ .get_temp = thermal_mmio_get_temperature,
+};
+
+static int thermal_mmio_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ struct thermal_mmio *sensor;
+ int (*sensor_init_func)(struct platform_device *pdev,
+ struct thermal_mmio *sensor);
+ struct thermal_zone_device *thermal_zone;
+ int ret;
+ int temperature;
+
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(resource)) {
+ dev_err(&pdev->dev,
+ "fail to get platform memory resource (%ld)\n",
+ PTR_ERR(resource));
+ return PTR_ERR(resource);
+ }
+
+ sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource);
+ if (IS_ERR(sensor->mmio_base)) {
+ dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
+ PTR_ERR(sensor->mmio_base));
+ return PTR_ERR(sensor->mmio_base);
+ }
+
+ sensor_init_func = device_get_match_data(&pdev->dev);
+ if (sensor_init_func) {
+ ret = sensor_init_func(pdev, sensor);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize sensor (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ thermal_zone = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ 0,
+ sensor,
+ &thermal_mmio_ops);
+ if (IS_ERR(thermal_zone)) {
+ dev_err(&pdev->dev,
+ "failed to register sensor (%ld)\n",
+ PTR_ERR(thermal_zone));
+ return PTR_ERR(thermal_zone);
+ }
+
+ thermal_mmio_get_temperature(sensor, &temperature);
+ dev_info(&pdev->dev,
+ "thermal mmio sensor %s registered, current temperature: %d\n",
+ pdev->name, temperature);
+
+ return 0;
+}
+
+static int al_thermal_init(struct platform_device *pdev,
+ struct thermal_mmio *sensor)
+{
+ sensor->read_mmio = thermal_mmio_readb;
+ sensor->mask = 0xff;
+ sensor->factor = 1000;
+
+ return 0;
+}
+
+static const struct of_device_id thermal_mmio_id_table[] = {
+ { .compatible = "amazon,al-thermal", .data = al_thermal_init},
+ {}
+};
+MODULE_DEVICE_TABLE(of, thermal_mmio_id_table);
+
+static struct platform_driver thermal_mmio_driver = {
+ .probe = thermal_mmio_probe,
+ .driver = {
+ .name = "thermal-mmio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(thermal_mmio_id_table),
+ },
+};
+
+module_platform_driver(thermal_mmio_driver);
+
+MODULE_AUTHOR("Talel Shenhar <talel@amazon.com>");
+MODULE_DESCRIPTION("Thermal MMIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
index 75155bde2b88..31f53fa77e4a 100644
--- a/drivers/tty/hvc/hvc_riscv_sbi.c
+++ b/drivers/tty/hvc/hvc_riscv_sbi.c
@@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
static int __init hvc_sbi_console_init(void)
{
hvc_instantiate(0, 0, &hvc_sbi_ops);
- add_preferred_console("hvc", 0, NULL);
return 0;
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 59e82e6d776d..573b2055173c 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -527,8 +527,12 @@ void __handle_sysrq(int key, bool check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
+ int orig_suppress_printk;
int i;
+ orig_suppress_printk = suppress_printk;
+ suppress_printk = 0;
+
rcu_sysrq_start();
rcu_read_lock();
/*
@@ -574,6 +578,8 @@ void __handle_sysrq(int key, bool check_mask)
}
rcu_read_unlock();
rcu_sysrq_end();
+
+ suppress_printk = orig_suppress_printk;
}
void handle_sysrq(int key)
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index ca8a94f15ac0..38183ac438c6 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -40,8 +40,6 @@ struct da8xx_ohci_hcd {
struct phy *usb11_phy;
struct regulator *vbus_reg;
struct notifier_block nb;
- unsigned int reg_enabled;
- struct gpio_desc *vbus_gpio;
struct gpio_desc *oc_gpio;
};
@@ -92,29 +90,21 @@ static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
struct device *dev = hcd->self.controller;
int ret;
- if (da8xx_ohci->vbus_gpio) {
- gpiod_set_value_cansleep(da8xx_ohci->vbus_gpio, on);
- return 0;
- }
-
if (!da8xx_ohci->vbus_reg)
return 0;
- if (on && !da8xx_ohci->reg_enabled) {
+ if (on) {
ret = regulator_enable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 1;
-
- } else if (!on && da8xx_ohci->reg_enabled) {
+ } else {
ret = regulator_disable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to disable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 0;
}
return 0;
@@ -124,9 +114,6 @@ static int ohci_da8xx_get_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return gpiod_get_value_cansleep(da8xx_ohci->vbus_gpio);
-
if (da8xx_ohci->vbus_reg)
return regulator_is_enabled(da8xx_ohci->vbus_reg);
@@ -159,9 +146,6 @@ static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return 1;
-
if (da8xx_ohci->vbus_reg)
return 1;
@@ -206,12 +190,18 @@ static int ohci_da8xx_regulator_event(struct notifier_block *nb,
return 0;
}
-static irqreturn_t ohci_da8xx_oc_handler(int irq, void *data)
+static irqreturn_t ohci_da8xx_oc_thread(int irq, void *data)
{
struct da8xx_ohci_hcd *da8xx_ohci = data;
+ struct device *dev = da8xx_ohci->hcd->self.controller;
+ int ret;
- if (gpiod_get_value(da8xx_ohci->oc_gpio))
- gpiod_set_value(da8xx_ohci->vbus_gpio, 0);
+ if (gpiod_get_value_cansleep(da8xx_ohci->oc_gpio) &&
+ da8xx_ohci->vbus_reg) {
+ ret = regulator_disable(da8xx_ohci->vbus_reg);
+ if (ret)
+ dev_err(dev, "Failed to disable regulator: %d\n", ret);
+ }
return IRQ_HANDLED;
}
@@ -424,11 +414,6 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
}
}
- da8xx_ohci->vbus_gpio = devm_gpiod_get_optional(dev, "vbus",
- GPIOD_OUT_HIGH);
- if (IS_ERR(da8xx_ohci->vbus_gpio))
- goto err;
-
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
if (IS_ERR(da8xx_ohci->oc_gpio))
goto err;
@@ -438,8 +423,9 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
if (oc_irq < 0)
goto err;
- error = devm_request_irq(dev, oc_irq, ohci_da8xx_oc_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ error = devm_request_threaded_irq(dev, oc_irq, NULL,
+ ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"OHCI over-current indicator", da8xx_ohci);
if (error)
goto err;
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index be04c117fe80..c97f270338bf 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -142,7 +142,6 @@ config USB_FTDI_ELAN
config USB_APPLEDISPLAY
tristate "Apple Cinema Display support"
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to control the backlight of Apple Cinema
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index b96fedc77ee5..3cc1a05fde1c 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -88,7 +88,7 @@ static void mdev_release_parent(struct kref *kref)
put_device(dev);
}
-static inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
+static struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
{
if (parent)
kref_get(&parent->ref);
@@ -96,7 +96,7 @@ static inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
return parent;
}
-static inline void mdev_put_parent(struct mdev_parent *parent)
+static void mdev_put_parent(struct mdev_parent *parent)
{
if (parent)
kref_put(&parent->ref, mdev_release_parent);
@@ -141,7 +141,7 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
*/
ret = parent->ops->remove(mdev);
if (ret && !force_remove)
- return -EBUSY;
+ return ret;
sysfs_remove_groups(&mdev->dev.kobj, parent->ops->mdev_attr_groups);
return 0;
@@ -149,10 +149,10 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
static int mdev_device_remove_cb(struct device *dev, void *data)
{
- if (!dev_is_mdev(dev))
- return 0;
+ if (dev_is_mdev(dev))
+ mdev_device_remove(dev, true);
- return mdev_device_remove(dev, data ? *(bool *)data : true);
+ return 0;
}
/*
@@ -181,6 +181,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
/* Check for duplicate */
parent = __find_parent_device(dev);
if (parent) {
+ parent = NULL;
ret = -EEXIST;
goto add_dev_err;
}
@@ -239,7 +240,6 @@ EXPORT_SYMBOL(mdev_register_device);
void mdev_unregister_device(struct device *dev)
{
struct mdev_parent *parent;
- bool force_remove = true;
mutex_lock(&parent_list_lock);
parent = __find_parent_device(dev);
@@ -253,8 +253,7 @@ void mdev_unregister_device(struct device *dev)
list_del(&parent->next);
class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
- device_for_each_child(dev, (void *)&force_remove,
- mdev_device_remove_cb);
+ device_for_each_child(dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
@@ -310,7 +309,6 @@ int mdev_device_create(struct kobject *kobj,
mutex_unlock(&mdev_list_lock);
mdev->parent = parent;
- kref_init(&mdev->ref);
mdev->dev.parent = dev;
mdev->dev.bus = &mdev_bus_type;
@@ -390,6 +388,24 @@ int mdev_device_remove(struct device *dev, bool force_remove)
return 0;
}
+int mdev_set_iommu_device(struct device *dev, struct device *iommu_device)
+{
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ mdev->iommu_device = iommu_device;
+
+ return 0;
+}
+EXPORT_SYMBOL(mdev_set_iommu_device);
+
+struct device *mdev_get_iommu_device(struct device *dev)
+{
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ return mdev->iommu_device;
+}
+EXPORT_SYMBOL(mdev_get_iommu_device);
+
static int __init mdev_init(void)
{
return mdev_bus_register();
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 379758c52b1b..36cbbdb754de 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -30,9 +30,9 @@ struct mdev_device {
struct mdev_parent *parent;
guid_t uuid;
void *driver_data;
- struct kref ref;
struct list_head next;
struct kobject *type_kobj;
+ struct device *iommu_device;
bool active;
};
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 5193a0e0ce5a..cbf94b8165ea 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -280,7 +280,7 @@ type_link_failed:
void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type)
{
+ sysfs_remove_files(&dev->kobj, mdev_device_attrs);
sysfs_remove_link(&dev->kobj, "mdev_type");
sysfs_remove_link(type->devices_kobj, dev_name(dev));
- sysfs_remove_files(&dev->kobj, mdev_device_attrs);
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 3fa20e95a6bb..cab71da46f4a 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/eventfd.h>
@@ -287,12 +288,11 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
if (!vdev->pci_saved_state)
- pr_debug("%s: Couldn't store %s saved state\n",
- __func__, dev_name(&pdev->dev));
+ pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
if (likely(!nointxmask)) {
if (vfio_pci_nointx(pdev)) {
- dev_info(&pdev->dev, "Masking broken INTx support\n");
+ pci_info(pdev, "Masking broken INTx support\n");
vdev->nointx = true;
pci_intx(pdev, 0);
} else
@@ -336,8 +336,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
if (ret) {
- dev_warn(&vdev->pdev->dev,
- "Failed to setup Intel IGD regions\n");
+ pci_warn(pdev, "Failed to setup Intel IGD regions\n");
goto disable_exit;
}
}
@@ -346,8 +345,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
if (ret && ret != -ENODEV) {
- dev_warn(&vdev->pdev->dev,
- "Failed to setup NVIDIA NV2 RAM region\n");
+ pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM region\n");
goto disable_exit;
}
}
@@ -356,8 +354,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
ret = vfio_pci_ibm_npu2_init(vdev);
if (ret && ret != -ENODEV) {
- dev_warn(&vdev->pdev->dev,
- "Failed to setup NVIDIA NV2 ATSD region\n");
+ pci_warn(pdev, "Failed to setup NVIDIA NV2 ATSD region\n");
goto disable_exit;
}
}
@@ -429,8 +426,7 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
* is just busy work.
*/
if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
- pr_info("%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&pdev->dev));
+ pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
if (!vdev->reset_works)
goto out;
@@ -1255,17 +1251,18 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
static void vfio_pci_request(void *device_data, unsigned int count)
{
struct vfio_pci_device *vdev = device_data;
+ struct pci_dev *pdev = vdev->pdev;
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
if (!(count % 10))
- dev_notice_ratelimited(&vdev->pdev->dev,
+ pci_notice_ratelimited(pdev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(vdev->req_trigger, 1);
} else if (count == 0) {
- dev_warn(&vdev->pdev->dev,
+ pci_warn(pdev,
"No device request channel registered, blocked until released by user\n");
}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index e82b51114687..52963a904790 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -412,8 +412,7 @@ static void vfio_bar_restore(struct vfio_pci_device *vdev)
if (pdev->is_virtfn)
return;
- pr_info("%s: %s reset recovery - restoring bars\n",
- __func__, dev_name(&pdev->dev));
+ pci_info(pdev, "%s: reset recovery - restoring BARs\n", __func__);
for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
pci_user_write_config_dword(pdev, i, *rbar);
@@ -1298,8 +1297,8 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
else
return PCI_SATA_SIZEOF_SHORT;
default:
- pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n",
- dev_name(&pdev->dev), __func__, cap, pos);
+ pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n",
+ __func__, cap, pos);
}
return 0;
@@ -1372,8 +1371,8 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
}
return PCI_TPH_BASE_SIZEOF;
default:
- pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n",
- dev_name(&pdev->dev), __func__, ecap, epos);
+ pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n",
+ __func__, ecap, epos);
}
return 0;
@@ -1474,8 +1473,8 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
}
if (!len) {
- pr_info("%s: %s hiding cap 0x%x\n",
- __func__, dev_name(&pdev->dev), cap);
+ pci_info(pdev, "%s: hiding cap %#x@%#x\n", __func__,
+ cap, pos);
*prev = next;
pos = next;
continue;
@@ -1486,9 +1485,8 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
continue;
- pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
- __func__, dev_name(&pdev->dev),
- pos + i, map[pos + i], cap);
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n",
+ __func__, pos + i, map[pos + i], cap);
}
BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
@@ -1549,8 +1547,8 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
}
if (!len) {
- pr_info("%s: %s hiding ecap 0x%x@0x%x\n",
- __func__, dev_name(&pdev->dev), ecap, epos);
+ pci_info(pdev, "%s: hiding ecap %#x@%#x\n",
+ __func__, ecap, epos);
/* If not the first in the chain, we can skip over it */
if (prev) {
@@ -1572,9 +1570,8 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
continue;
- pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
- __func__, dev_name(&pdev->dev),
- epos + i, map[epos + i], ecap);
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n",
+ __func__, epos + i, map[epos + i], ecap);
}
/*
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 32f695ffe128..50fe3c4f7feb 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -472,6 +472,8 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
return 0;
free_exit:
+ if (data->base)
+ memunmap(data->base);
kfree(data);
return ret;
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index 3ddb2704221d..fe95964bc3be 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -89,7 +89,8 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
} while ((pcs_value & MDIO_CTRL1_RESET) && --count);
if (pcs_value & MDIO_CTRL1_RESET)
- pr_warn("%s XGBE PHY reset timeout\n", __func__);
+ dev_warn(vdev->device, "%s: XGBE PHY reset timeout\n",
+ __func__);
/* disable auto-negotiation */
value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1);
@@ -114,7 +115,7 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
usleep_range(500, 600);
if (!count)
- pr_warn("%s MAC SW reset failed\n", __func__);
+ dev_warn(vdev->device, "%s: MAC SW reset failed\n", __func__);
return 0;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index c0cd824be2b7..2a45b36bcf58 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -12,6 +12,8 @@
* GNU General Public License for more details.
*/
+#define dev_fmt(fmt) "VFIO: " fmt
+
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
@@ -63,7 +65,7 @@ static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
adev = ACPI_COMPANION(dev);
if (!adev) {
- pr_err("VFIO: ACPI companion device not found for %s\n",
+ dev_err(dev, "ACPI companion device not found for %s\n",
vdev->name);
return -ENODEV;
}
@@ -638,7 +640,7 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
ret = device_property_read_string(dev, "compatible",
&vdev->compat);
if (ret)
- pr_err("VFIO: Cannot retrieve compat for %s\n", vdev->name);
+ dev_err(dev, "Cannot retrieve compat for %s\n", vdev->name);
return ret;
}
@@ -680,14 +682,14 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
ret = vfio_platform_get_reset(vdev);
if (ret && vdev->reset_required) {
- pr_err("VFIO: No reset function found for device %s\n",
- vdev->name);
+ dev_err(dev, "No reset function found for device %s\n",
+ vdev->name);
return ret;
}
group = vfio_iommu_group_get(dev);
if (!group) {
- pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
+ dev_err(dev, "No IOMMU group for device %s\n", vdev->name);
ret = -EINVAL;
goto put_reset;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index a3030cdf3c18..82fcf07fa9ea 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -34,6 +34,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/wait.h>
+#include <linux/sched/signal.h>
#define DRIVER_VERSION "0.3"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -704,8 +705,8 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
return 0;
/* TODO Prevent device auto probing */
- WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
- iommu_group_id(group->iommu_group));
+ dev_WARN(dev, "Device added to live group %d!\n",
+ iommu_group_id(group->iommu_group));
return 0;
}
@@ -748,25 +749,22 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
*/
break;
case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
- pr_debug("%s: Device %s, group %d binding to driver\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group));
+ dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
+ iommu_group_id(group->iommu_group));
break;
case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
- pr_debug("%s: Device %s, group %d bound to driver %s\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group), dev->driver->name);
+ dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
+ iommu_group_id(group->iommu_group), dev->driver->name);
BUG_ON(vfio_group_nb_verify(group, dev));
break;
case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
- pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group), dev->driver->name);
+ dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
+ __func__, iommu_group_id(group->iommu_group),
+ dev->driver->name);
break;
case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
- pr_debug("%s: Device %s, group %d unbound from driver\n",
- __func__, dev_name(dev),
- iommu_group_id(group->iommu_group));
+ dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
+ iommu_group_id(group->iommu_group));
/*
* XXX An unbound device in a live group is ok, but we'd
* really like to avoid the above BUG_ON by preventing other
@@ -830,8 +828,8 @@ int vfio_add_group_dev(struct device *dev,
device = vfio_group_get_device(group, dev);
if (device) {
- WARN(1, "Device %s already exists on group %d\n",
- dev_name(dev), iommu_group_id(iommu_group));
+ dev_WARN(dev, "Device already exists on group %d\n",
+ iommu_group_id(iommu_group));
vfio_device_put(device);
vfio_group_put(group);
return -EBUSY;
@@ -904,30 +902,17 @@ void *vfio_device_data(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_device_data);
-/* Given a referenced group, check if it contains the device */
-static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
-{
- struct vfio_device *device;
-
- device = vfio_group_get_device(group, dev);
- if (!device)
- return false;
-
- vfio_device_put(device);
- return true;
-}
-
/*
* Decrement the device reference count and wait for the device to be
* removed. Open file descriptors for the device... */
void *vfio_del_group_dev(struct device *dev)
{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct vfio_device *device = dev_get_drvdata(dev);
struct vfio_group *group = device->group;
void *device_data = device->device_data;
struct vfio_unbound_dev *unbound;
unsigned int i = 0;
- long ret;
bool interrupted = false;
/*
@@ -964,6 +949,8 @@ void *vfio_del_group_dev(struct device *dev)
* interval with counter to allow the driver to take escalating
* measures to release the device if it has the ability to do so.
*/
+ add_wait_queue(&vfio.release_q, &wait);
+
do {
device = vfio_group_get_device(group, dev);
if (!device)
@@ -975,12 +962,10 @@ void *vfio_del_group_dev(struct device *dev)
vfio_device_put(device);
if (interrupted) {
- ret = wait_event_timeout(vfio.release_q,
- !vfio_dev_present(group, dev), HZ * 10);
+ wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
} else {
- ret = wait_event_interruptible_timeout(vfio.release_q,
- !vfio_dev_present(group, dev), HZ * 10);
- if (ret == -ERESTARTSYS) {
+ wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
+ if (signal_pending(current)) {
interrupted = true;
dev_warn(dev,
"Device is currently in use, task"
@@ -989,8 +974,10 @@ void *vfio_del_group_dev(struct device *dev)
current->comm, task_pid_nr(current));
}
}
- } while (ret <= 0);
+ } while (1);
+
+ remove_wait_queue(&vfio.release_q, &wait);
/*
* In order to support multiple devices per group, devices can be
* plucked from the group while other devices in the group are still
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 6b64e45a5269..40ddc0c5f677 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -532,7 +532,8 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
enum dma_data_direction direction = iommu_tce_direction(tce);
if (get_user_pages_fast(tce & PAGE_MASK, 1,
- direction != DMA_TO_DEVICE, &page) != 1)
+ direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
+ &page) != 1)
return -EFAULT;
*hpa = __pa((unsigned long) page_address(page));
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index d0f731c9920a..3ddc375e7063 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -97,6 +97,7 @@ struct vfio_dma {
struct vfio_group {
struct iommu_group *iommu_group;
struct list_head next;
+ bool mdev_group; /* An mdev group */
};
/*
@@ -357,7 +358,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem);
if (mm == current->mm) {
- ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
+ ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
+ vmas);
} else {
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
vmas, NULL);
@@ -564,7 +566,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
mutex_lock(&iommu->lock);
/* Fail if notifier list is empty */
- if ((!iommu->external_domain) || (!iommu->notifier.head)) {
+ if (!iommu->notifier.head) {
ret = -EINVAL;
goto pin_done;
}
@@ -646,11 +648,6 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
mutex_lock(&iommu->lock);
- if (!iommu->external_domain) {
- mutex_unlock(&iommu->lock);
- return -EINVAL;
- }
-
do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
for (i = 0; i < npage; i++) {
struct vfio_dma *dma;
@@ -1311,13 +1308,109 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
return ret;
}
+static struct device *vfio_mdev_get_iommu_device(struct device *dev)
+{
+ struct device *(*fn)(struct device *dev);
+ struct device *iommu_device;
+
+ fn = symbol_get(mdev_get_iommu_device);
+ if (fn) {
+ iommu_device = fn(dev);
+ symbol_put(mdev_get_iommu_device);
+
+ return iommu_device;
+ }
+
+ return NULL;
+}
+
+static int vfio_mdev_attach_domain(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+ struct device *iommu_device;
+
+ iommu_device = vfio_mdev_get_iommu_device(dev);
+ if (iommu_device) {
+ if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
+ return iommu_aux_attach_device(domain, iommu_device);
+ else
+ return iommu_attach_device(domain, iommu_device);
+ }
+
+ return -EINVAL;
+}
+
+static int vfio_mdev_detach_domain(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+ struct device *iommu_device;
+
+ iommu_device = vfio_mdev_get_iommu_device(dev);
+ if (iommu_device) {
+ if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
+ iommu_aux_detach_device(domain, iommu_device);
+ else
+ iommu_detach_device(domain, iommu_device);
+ }
+
+ return 0;
+}
+
+static int vfio_iommu_attach_group(struct vfio_domain *domain,
+ struct vfio_group *group)
+{
+ if (group->mdev_group)
+ return iommu_group_for_each_dev(group->iommu_group,
+ domain->domain,
+ vfio_mdev_attach_domain);
+ else
+ return iommu_attach_group(domain->domain, group->iommu_group);
+}
+
+static void vfio_iommu_detach_group(struct vfio_domain *domain,
+ struct vfio_group *group)
+{
+ if (group->mdev_group)
+ iommu_group_for_each_dev(group->iommu_group, domain->domain,
+ vfio_mdev_detach_domain);
+ else
+ iommu_detach_group(domain->domain, group->iommu_group);
+}
+
+static bool vfio_bus_is_mdev(struct bus_type *bus)
+{
+ struct bus_type *mdev_bus;
+ bool ret = false;
+
+ mdev_bus = symbol_get(mdev_bus_type);
+ if (mdev_bus) {
+ ret = (bus == mdev_bus);
+ symbol_put(mdev_bus_type);
+ }
+
+ return ret;
+}
+
+static int vfio_mdev_iommu_device(struct device *dev, void *data)
+{
+ struct device **old = data, *new;
+
+ new = vfio_mdev_get_iommu_device(dev);
+ if (!new || (*old && *old != new))
+ return -EINVAL;
+
+ *old = new;
+
+ return 0;
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_group *group;
struct vfio_domain *domain, *d;
- struct bus_type *bus = NULL, *mdev_bus;
+ struct bus_type *bus = NULL;
int ret;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base;
@@ -1352,23 +1445,30 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_free;
- mdev_bus = symbol_get(mdev_bus_type);
+ if (vfio_bus_is_mdev(bus)) {
+ struct device *iommu_device = NULL;
- if (mdev_bus) {
- if ((bus == mdev_bus) && !iommu_present(bus)) {
- symbol_put(mdev_bus_type);
+ group->mdev_group = true;
+
+ /* Determine the isolation type */
+ ret = iommu_group_for_each_dev(iommu_group, &iommu_device,
+ vfio_mdev_iommu_device);
+ if (ret || !iommu_device) {
if (!iommu->external_domain) {
INIT_LIST_HEAD(&domain->group_list);
iommu->external_domain = domain;
- } else
+ } else {
kfree(domain);
+ }
list_add(&group->next,
&iommu->external_domain->group_list);
mutex_unlock(&iommu->lock);
+
return 0;
}
- symbol_put(mdev_bus_type);
+
+ bus = iommu_device->bus;
}
domain->domain = iommu_domain_alloc(bus);
@@ -1386,7 +1486,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_domain;
}
- ret = iommu_attach_group(domain->domain, iommu_group);
+ ret = vfio_iommu_attach_group(domain, group);
if (ret)
goto out_domain;
@@ -1418,8 +1518,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_for_each_entry(d, &iommu->domain_list, next) {
if (d->domain->ops == domain->domain->ops &&
d->prot == domain->prot) {
- iommu_detach_group(domain->domain, iommu_group);
- if (!iommu_attach_group(d->domain, iommu_group)) {
+ vfio_iommu_detach_group(domain, group);
+ if (!vfio_iommu_attach_group(d, group)) {
list_add(&group->next, &d->group_list);
iommu_domain_free(domain->domain);
kfree(domain);
@@ -1427,7 +1527,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
return 0;
}
- ret = iommu_attach_group(domain->domain, iommu_group);
+ ret = vfio_iommu_attach_group(domain, group);
if (ret)
goto out_domain;
}
@@ -1453,7 +1553,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
return 0;
out_detach:
- iommu_detach_group(domain->domain, iommu_group);
+ vfio_iommu_detach_group(domain, group);
out_domain:
iommu_domain_free(domain->domain);
out_free:
@@ -1544,7 +1644,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (!group)
continue;
- iommu_detach_group(domain->domain, iommu_group);
+ vfio_iommu_detach_group(domain, group);
list_del(&group->next);
kfree(group);
/*
@@ -1610,7 +1710,7 @@ static void vfio_release_domain(struct vfio_domain *domain, bool external)
list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) {
if (!external)
- iommu_detach_group(domain->domain, group->iommu_group);
+ vfio_iommu_detach_group(domain, group);
list_del(&group->next);
kfree(group);
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 618fb6461017..c090d177bd75 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1443,7 +1443,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg;
- smp_mb__after_atomic();
match = true;
}
mutex_unlock(&tpg->tv_tpg_mutex);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 351af88231ad..1e3ed41ae1f3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1704,7 +1704,7 @@ static int set_bit_to_user(int nr, void __user *addr)
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
- r = get_user_pages_fast(log, 1, 1, &page);
+ r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
if (r < 0)
return r;
BUG_ON(r != 1);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 71ee978c848f..3ed1d9084f94 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -2,13 +2,7 @@
# Backlight & LCD drivers configuration
#
-menuconfig BACKLIGHT_LCD_SUPPORT
- bool "Backlight & LCD device support"
- help
- Enable this to be able to choose the drivers for controlling the
- backlight and the LCD panel on some platforms, for example on PDAs.
-
-if BACKLIGHT_LCD_SUPPORT
+menu "Backlight & LCD device support"
#
# LCD
@@ -199,7 +193,6 @@ config BACKLIGHT_IPAQ_MICRO
config BACKLIGHT_LM3533
tristate "Backlight Driver for LM3533"
- depends on BACKLIGHT_CLASS_DEVICE
depends on MFD_LM3533
help
Say Y to enable the backlight driver for National Semiconductor / TI
@@ -323,7 +316,7 @@ config BACKLIGHT_ADP5520
config BACKLIGHT_ADP8860
tristate "Backlight Driver for ADP8860/ADP8861/ADP8863 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on I2C
select NEW_LEDS
select LEDS_CLASS
help
@@ -335,7 +328,7 @@ config BACKLIGHT_ADP8860
config BACKLIGHT_ADP8870
tristate "Backlight Driver for ADP8870 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on I2C
select NEW_LEDS
select LEDS_CLASS
help
@@ -353,28 +346,28 @@ config BACKLIGHT_88PM860X
config BACKLIGHT_PCF50633
tristate "Backlight driver for NXP PCF50633 MFD"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_PCF50633
+ depends on MFD_PCF50633
help
If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
enable its driver.
config BACKLIGHT_AAT2870
tristate "AnalogicTech AAT2870 Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE
+ depends on MFD_AAT2870_CORE
help
If you have a AnalogicTech AAT2870 say Y to enable the
backlight driver.
config BACKLIGHT_LM3630A
tristate "Backlight Driver for LM3630A"
- depends on BACKLIGHT_CLASS_DEVICE && I2C && PWM
+ depends on I2C && PWM
select REGMAP_I2C
help
This supports TI LM3630A Backlight Driver
config BACKLIGHT_LM3639
tristate "Backlight Driver for LM3639"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on I2C
select REGMAP_I2C
select NEW_LEDS
select LEDS_CLASS
@@ -383,20 +376,20 @@ config BACKLIGHT_LM3639
config BACKLIGHT_LP855X
tristate "Backlight driver for TI LP855X"
- depends on BACKLIGHT_CLASS_DEVICE && I2C && PWM
+ depends on I2C && PWM
help
This supports TI LP8550, LP8551, LP8552, LP8553, LP8555, LP8556 and
LP8557 backlight driver.
config BACKLIGHT_LP8788
tristate "Backlight driver for TI LP8788 MFD"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788 && PWM
+ depends on MFD_LP8788 && PWM
help
This supports TI LP8788 backlight driver.
config BACKLIGHT_OT200
tristate "Backlight driver for ot200 visualisation device"
- depends on BACKLIGHT_CLASS_DEVICE && CS5535_MFGPT && GPIO_CS5535
+ depends on CS5535_MFGPT && GPIO_CS5535
help
To compile this driver as a module, choose M here: the module will be
called ot200_bl.
@@ -410,7 +403,7 @@ config BACKLIGHT_PANDORA
config BACKLIGHT_SKY81452
tristate "Backlight driver for SKY81452"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_SKY81452
+ depends on MFD_SKY81452
help
If you have a Skyworks SKY81452, say Y to enable the
backlight driver.
@@ -420,14 +413,14 @@ config BACKLIGHT_SKY81452
config BACKLIGHT_TPS65217
tristate "TPS65217 Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_TPS65217
+ depends on MFD_TPS65217
help
If you have a Texas Instruments TPS65217 say Y to enable the
backlight driver.
config BACKLIGHT_AS3711
tristate "AS3711 Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_AS3711
+ depends on MFD_AS3711
help
If you have an Austrian Microsystems AS3711 say Y to enable the
backlight driver.
@@ -466,4 +459,4 @@ config BACKLIGHT_RAVE_SP
endif # BACKLIGHT_CLASS_DEVICE
-endif # BACKLIGHT_LCD_SUPPORT
+endmenu
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 2030a6b77a09..75d996490cf0 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -35,6 +35,14 @@
#define REG_MAX 0x50
#define INT_DEBOUNCE_MSEC 10
+
+#define LM3630A_BANK_0 0
+#define LM3630A_BANK_1 1
+
+#define LM3630A_NUM_SINKS 2
+#define LM3630A_SINK_0 0
+#define LM3630A_SINK_1 1
+
struct lm3630a_chip {
struct device *dev;
struct delayed_work work;
@@ -201,7 +209,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE);
if (ret < 0)
goto out_i2c_err;
- return bl->props.brightness;
+ return 0;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access\n");
@@ -278,7 +286,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE);
if (ret < 0)
goto out_i2c_err;
- return bl->props.brightness;
+ return 0;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
@@ -329,15 +337,17 @@ static const struct backlight_ops lm3630a_bank_b_ops = {
static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
{
- struct backlight_properties props;
struct lm3630a_platform_data *pdata = pchip->pdata;
+ struct backlight_properties props;
+ const char *label;
props.type = BACKLIGHT_RAW;
if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
props.brightness = pdata->leda_init_brt;
props.max_brightness = pdata->leda_max_brt;
+ label = pdata->leda_label ? pdata->leda_label : "lm3630a_leda";
pchip->bleda =
- devm_backlight_device_register(pchip->dev, "lm3630a_leda",
+ devm_backlight_device_register(pchip->dev, label,
pchip->dev, pchip,
&lm3630a_bank_a_ops, &props);
if (IS_ERR(pchip->bleda))
@@ -348,8 +358,9 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
(pdata->ledb_ctrl != LM3630A_LEDB_ON_A)) {
props.brightness = pdata->ledb_init_brt;
props.max_brightness = pdata->ledb_max_brt;
+ label = pdata->ledb_label ? pdata->ledb_label : "lm3630a_ledb";
pchip->bledb =
- devm_backlight_device_register(pchip->dev, "lm3630a_ledb",
+ devm_backlight_device_register(pchip->dev, label,
pchip->dev, pchip,
&lm3630a_bank_b_ops, &props);
if (IS_ERR(pchip->bledb))
@@ -364,6 +375,123 @@ static const struct regmap_config lm3630a_regmap = {
.max_register = REG_MAX,
};
+static int lm3630a_parse_led_sources(struct fwnode_handle *node,
+ int default_led_sources)
+{
+ u32 sources[LM3630A_NUM_SINKS];
+ int ret, num_sources, i;
+
+ num_sources = fwnode_property_read_u32_array(node, "led-sources", NULL,
+ 0);
+ if (num_sources < 0)
+ return default_led_sources;
+ else if (num_sources > ARRAY_SIZE(sources))
+ return -EINVAL;
+
+ ret = fwnode_property_read_u32_array(node, "led-sources", sources,
+ num_sources);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_sources; i++) {
+ if (sources[i] < LM3630A_SINK_0 || sources[i] > LM3630A_SINK_1)
+ return -EINVAL;
+
+ ret |= BIT(sources[i]);
+ }
+
+ return ret;
+}
+
+static int lm3630a_parse_bank(struct lm3630a_platform_data *pdata,
+ struct fwnode_handle *node, int *seen_led_sources)
+{
+ int led_sources, ret;
+ const char *label;
+ u32 bank, val;
+ bool linear;
+
+ ret = fwnode_property_read_u32(node, "reg", &bank);
+ if (ret)
+ return ret;
+
+ if (bank < LM3630A_BANK_0 || bank > LM3630A_BANK_1)
+ return -EINVAL;
+
+ led_sources = lm3630a_parse_led_sources(node, BIT(bank));
+ if (led_sources < 0)
+ return led_sources;
+
+ if (*seen_led_sources & led_sources)
+ return -EINVAL;
+
+ *seen_led_sources |= led_sources;
+
+ linear = fwnode_property_read_bool(node,
+ "ti,linear-mapping-mode");
+ if (bank) {
+ if (led_sources & BIT(LM3630A_SINK_0) ||
+ !(led_sources & BIT(LM3630A_SINK_1)))
+ return -EINVAL;
+
+ pdata->ledb_ctrl = linear ?
+ LM3630A_LEDB_ENABLE_LINEAR :
+ LM3630A_LEDB_ENABLE;
+ } else {
+ if (!(led_sources & BIT(LM3630A_SINK_0)))
+ return -EINVAL;
+
+ pdata->leda_ctrl = linear ?
+ LM3630A_LEDA_ENABLE_LINEAR :
+ LM3630A_LEDA_ENABLE;
+
+ if (led_sources & BIT(LM3630A_SINK_1))
+ pdata->ledb_ctrl = LM3630A_LEDB_ON_A;
+ }
+
+ ret = fwnode_property_read_string(node, "label", &label);
+ if (!ret) {
+ if (bank)
+ pdata->ledb_label = label;
+ else
+ pdata->leda_label = label;
+ }
+
+ ret = fwnode_property_read_u32(node, "default-brightness",
+ &val);
+ if (!ret) {
+ if (bank)
+ pdata->ledb_init_brt = val;
+ else
+ pdata->leda_init_brt = val;
+ }
+
+ ret = fwnode_property_read_u32(node, "max-brightness", &val);
+ if (!ret) {
+ if (bank)
+ pdata->ledb_max_brt = val;
+ else
+ pdata->leda_max_brt = val;
+ }
+
+ return 0;
+}
+
+static int lm3630a_parse_node(struct lm3630a_chip *pchip,
+ struct lm3630a_platform_data *pdata)
+{
+ int ret = -ENODEV, seen_led_sources = 0;
+ struct fwnode_handle *node;
+
+ device_for_each_child_node(pchip->dev, node) {
+ ret = lm3630a_parse_bank(pdata, node, &seen_led_sources);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int lm3630a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -396,13 +524,18 @@ static int lm3630a_probe(struct i2c_client *client,
GFP_KERNEL);
if (pdata == NULL)
return -ENOMEM;
+
/* default values */
- pdata->leda_ctrl = LM3630A_LEDA_ENABLE;
- pdata->ledb_ctrl = LM3630A_LEDB_ENABLE;
pdata->leda_max_brt = LM3630A_MAX_BRIGHTNESS;
pdata->ledb_max_brt = LM3630A_MAX_BRIGHTNESS;
pdata->leda_init_brt = LM3630A_MAX_BRIGHTNESS;
pdata->ledb_init_brt = LM3630A_MAX_BRIGHTNESS;
+
+ rval = lm3630a_parse_node(pchip, pdata);
+ if (rval) {
+ dev_err(&client->dev, "fail : parse node\n");
+ return rval;
+ }
}
pchip->pdata = pdata;
@@ -470,11 +603,17 @@ static const struct i2c_device_id lm3630a_id[] = {
{}
};
+static const struct of_device_id lm3630a_match_table[] = {
+ { .compatible = "ti,lm3630a", },
+ { },
+};
+
MODULE_DEVICE_TABLE(i2c, lm3630a_id);
static struct i2c_driver lm3630a_i2c_driver = {
.driver = {
.name = LM3630A_NAME,
+ .of_match_table = lm3630a_match_table,
},
.probe = lm3630a_probe,
.remove = lm3630a_remove,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 53b8ceea9bde..fb45f866b923 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -155,21 +155,6 @@ static const struct backlight_ops pwm_backlight_ops = {
#ifdef CONFIG_OF
#define PWM_LUMINANCE_SCALE 10000 /* luminance scale */
-/* An integer based power function */
-static u64 int_pow(u64 base, int exp)
-{
- u64 result = 1;
-
- while (exp) {
- if (exp & 1)
- result *= base;
- exp >>= 1;
- base *= base;
- }
-
- return result;
-}
-
/*
* CIE lightness to PWM conversion.
*
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 58a9590c9db6..bf6b77b964f1 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -45,25 +45,25 @@ menuconfig FB
device-aware may cause unexpected results. If unsure, say N.
config FIRMWARE_EDID
- bool "Enable firmware EDID"
- depends on FB
- ---help---
- This enables access to the EDID transferred from the firmware.
- On the i386, this is from the Video BIOS. Enable this if DDC/I2C
- transfers do not work for your driver and if you are using
- nvidiafb, i810fb or savagefb.
-
- In general, choosing Y for this option is safe. If you
- experience extremely long delays while booting before you get
- something on your display, try setting this to N. Matrox cards in
- combination with certain motherboards and monitors are known to
- suffer from this problem.
+ bool "Enable firmware EDID"
+ depends on FB
+ ---help---
+ This enables access to the EDID transferred from the firmware.
+ On the i386, this is from the Video BIOS. Enable this if DDC/I2C
+ transfers do not work for your driver and if you are using
+ nvidiafb, i810fb or savagefb.
+
+ In general, choosing Y for this option is safe. If you
+ experience extremely long delays while booting before you get
+ something on your display, try setting this to N. Matrox cards in
+ combination with certain motherboards and monitors are known to
+ suffer from this problem.
config FB_DDC
- tristate
- depends on FB
- select I2C_ALGOBIT
- select I2C
+ tristate
+ depends on FB
+ select I2C_ALGOBIT
+ select I2C
config FB_BOOT_VESA_SUPPORT
bool
@@ -160,8 +160,8 @@ config FB_LITTLE_ENDIAN
endchoice
config FB_SYS_FOPS
- tristate
- depends on FB
+ tristate
+ depends on FB
config FB_DEFERRED_IO
bool
@@ -180,41 +180,40 @@ config FB_SVGALIB
cards.
config FB_MACMODES
- tristate
- depends on FB
+ tristate
+ depends on FB
config FB_BACKLIGHT
tristate
depends on FB
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
config FB_MODE_HELPERS
- bool "Enable Video Mode Handling Helpers"
- depends on FB
+ bool "Enable Video Mode Handling Helpers"
+ depends on FB
---help---
This enables functions for handling video modes using the
Generalized Timing Formula and the EDID parser. A few drivers rely
- on this feature such as the radeonfb, rivafb, and the i810fb. If
+ on this feature such as the radeonfb, rivafb, and the i810fb. If
your driver does not take advantage of this feature, choosing Y will
just increase the kernel size by about 5K.
config FB_TILEBLITTING
- bool "Enable Tile Blitting Support"
- depends on FB
- ---help---
- This enables tile blitting. Tile blitting is a drawing technique
- where the screen is divided into rectangular sections (tiles), whereas
- the standard blitting divides the screen into pixels. Because the
- default drawing element is a tile, drawing functions will be passed
- parameters in terms of number of tiles instead of number of pixels.
- For example, to draw a single character, instead of using bitmaps,
- an index to an array of bitmaps will be used. To clear or move a
- rectangular section of a screen, the rectangle will be described in
- terms of number of tiles in the x- and y-axis.
-
- This is particularly important to one driver, matroxfb. If
- unsure, say N.
+ bool "Enable Tile Blitting Support"
+ depends on FB
+ ---help---
+ This enables tile blitting. Tile blitting is a drawing technique
+ where the screen is divided into rectangular sections (tiles), whereas
+ the standard blitting divides the screen into pixels. Because the
+ default drawing element is a tile, drawing functions will be passed
+ parameters in terms of number of tiles instead of number of pixels.
+ For example, to draw a single character, instead of using bitmaps,
+ an index to an array of bitmaps will be used. To clear or move a
+ rectangular section of a screen, the rectangle will be described in
+ terms of number of tiles in the x- and y-axis.
+
+ This is particularly important to one driver, matroxfb. If
+ unsure, say N.
comment "Frame buffer hardware drivers"
depends on FB
@@ -226,7 +225,7 @@ config FB_GRVGA
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
- This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
+ This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
config FB_CIRRUS
tristate "Cirrus Logic support"
@@ -281,7 +280,6 @@ config FB_ARMCLCD
select FB_CFB_IMAGEBLIT
select FB_MODE_HELPERS if OF
select VIDEOMODE_HELPERS if OF
- select BACKLIGHT_LCD_SUPPORT if OF
select BACKLIGHT_CLASS_DEVICE if OF
help
This framebuffer device driver is for the ARM PrimeCell PL110
@@ -293,14 +291,6 @@ config FB_ARMCLCD
here and read <file:Documentation/kbuild/modules.txt>. The module
will be called amba-clcd.
-# Helper logic selected only by the ARM Versatile platform family.
-config PLAT_VERSATILE_CLCD
- def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
- depends on ARM
- depends on FB_ARMCLCD && FB=y
- select REGMAP
- select MFD_SYSCON
-
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
@@ -315,7 +305,6 @@ config FB_ACORN
config FB_CLPS711X
tristate "CLPS711X LCD support"
depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
- select BACKLIGHT_LCD_SUPPORT
select FB_MODE_HELPERS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -343,7 +332,6 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
depends on FB && ARCH_MXC
- select BACKLIGHT_LCD_SUPPORT
select LCD_CLASS_DEVICE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -655,17 +643,17 @@ config FB_EFI
using the EFI framebuffer as your console.
config FB_N411
- tristate "N411 Apollo/Hecuba devkit support"
- depends on FB && X86 && MMU
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
- select FB_HECUBA
- help
- This enables support for the Apollo display controller in its
- Hecuba form using the n411 devkit.
+ tristate "N411 Apollo/Hecuba devkit support"
+ depends on FB && X86 && MMU
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+ select FB_HECUBA
+ help
+ This enables support for the Apollo display controller in its
+ Hecuba form using the n411 devkit.
config FB_HGA
tristate "Hercules mono graphics support"
@@ -685,7 +673,7 @@ config FB_GBE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- help
+ help
This is the frame buffer device driver for SGI Graphics Backend.
This chip is used in SGI O2 and Visual Workstation 320/540.
@@ -866,8 +854,8 @@ config FB_S1D13XXX
<http://vdc.epson.com/>
config FB_ATMEL
- tristate "AT91/AT32 LCD Controller support"
- depends on FB && HAVE_FB_ATMEL
+ tristate "AT91 LCD Controller support"
+ depends on FB && OF && HAVE_FB_ATMEL
select FB_BACKLIGHT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -875,7 +863,7 @@ config FB_ATMEL
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
- This enables support for the AT91/AT32 LCD Controller.
+ This enables support for the AT91 LCD Controller.
config FB_NVIDIA
tristate "nVidia Framebuffer Support"
@@ -897,10 +885,10 @@ config FB_NVIDIA
module will be called nvidiafb.
config FB_NVIDIA_I2C
- bool "Enable DDC Support"
- depends on FB_NVIDIA
- select FB_DDC
- help
+ bool "Enable DDC Support"
+ depends on FB_NVIDIA
+ select FB_DDC
+ help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
allowing for robust video mode handling and switching.
@@ -943,10 +931,10 @@ config FB_RIVA
module will be called rivafb.
config FB_RIVA_I2C
- bool "Enable DDC Support"
- depends on FB_RIVA
- select FB_DDC
- help
+ bool "Enable DDC Support"
+ depends on FB_RIVA
+ select FB_DDC
+ help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
allowing for robust video mode handling and switching.
@@ -991,37 +979,37 @@ config FB_I810
select FB_CFB_IMAGEBLIT
select VGASTATE
help
- This driver supports the on-board graphics built in to the Intel 810
- and 815 chipsets. Say Y if you have and plan to use such a board.
+ This driver supports the on-board graphics built in to the Intel 810
+ and 815 chipsets. Say Y if you have and plan to use such a board.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called i810fb.
- For more information, please read
+ For more information, please read
<file:Documentation/fb/intel810.txt>
config FB_I810_GTF
bool "use VESA Generalized Timing Formula"
depends on FB_I810
help
- If you say Y, then the VESA standard, Generalized Timing Formula
- or GTF, will be used to calculate the required video timing values
- per video mode. Since the GTF allows nondiscrete timings
- (nondiscrete being a range of values as opposed to discrete being a
- set of values), you'll be able to use any combination of horizontal
+ If you say Y, then the VESA standard, Generalized Timing Formula
+ or GTF, will be used to calculate the required video timing values
+ per video mode. Since the GTF allows nondiscrete timings
+ (nondiscrete being a range of values as opposed to discrete being a
+ set of values), you'll be able to use any combination of horizontal
and vertical resolutions, and vertical refresh rates without having
to specify your own timing parameters. This is especially useful
- to maximize the performance of an aging display, or if you just
- have a display with nonstandard dimensions. A VESA compliant
+ to maximize the performance of an aging display, or if you just
+ have a display with nonstandard dimensions. A VESA compliant
monitor is recommended, but can still work with non-compliant ones.
- If you need or want this, then select this option. The timings may
- not be compliant with Intel's recommended values. Use at your own
+ If you need or want this, then select this option. The timings may
+ not be compliant with Intel's recommended values. Use at your own
risk.
- If you say N, the driver will revert to discrete video timings
+ If you say N, the driver will revert to discrete video timings
using a set recommended by Intel in their documentation.
-
- If unsure, say N.
+
+ If unsure, say N.
config FB_I810_I2C
bool "Enable DDC Support"
@@ -1060,8 +1048,8 @@ config FB_INTEL
depends on !DRM_I915
help
This driver supports the on-board graphics built in to the Intel
- 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
- Say Y if you have and plan to use such a board.
+ 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
+ Say Y if you have and plan to use such a board.
To make FB_INTEL=Y work you need to say AGP_INTEL=y too.
@@ -1142,10 +1130,10 @@ config FB_MATROX_G
G450/G550 secondary head and digital output are supported without
additional modules.
- The driver starts in monitor mode. You must use the matroxset tool
- (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
- swap primary and secondary head outputs, or to change output mode.
- Secondary head driver always start in 640x480 resolution and you
+ The driver starts in monitor mode. You must use the matroxset tool
+ (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
+ swap primary and secondary head outputs, or to change output mode.
+ Secondary head driver always start in 640x480 resolution and you
must use fbset to change it.
Do not forget that second head supports only 16 and 32 bpp
@@ -1228,7 +1216,7 @@ config FB_RADEON_I2C
select FB_DDC
default y
help
- Say Y here if you want DDC/I2C support for your Radeon board.
+ Say Y here if you want DDC/I2C support for your Radeon board.
config FB_RADEON_BACKLIGHT
bool "Support for backlight control"
@@ -1357,10 +1345,10 @@ config FB_SAVAGE
will be called savagefb.
config FB_SAVAGE_I2C
- bool "Enable DDC2 Support"
- depends on FB_SAVAGE
- select FB_DDC
- help
+ bool "Enable DDC2 Support"
+ depends on FB_SAVAGE
+ select FB_DDC
+ help
This enables I2C support for S3 Savage Chipsets. This is used
only for getting EDID information from the attached display
allowing for robust video mode handling and switching.
@@ -1370,12 +1358,12 @@ config FB_SAVAGE_I2C
here.
config FB_SAVAGE_ACCEL
- bool "Enable Console Acceleration"
- depends on FB_SAVAGE
- help
- This option will compile in console acceleration support. If
- the resulting framebuffer console has bothersome glitches, then
- choose N here.
+ bool "Enable Console Acceleration"
+ depends on FB_SAVAGE
+ help
+ This option will compile in console acceleration support. If
+ the resulting framebuffer console has bothersome glitches, then
+ choose N here.
config FB_SIS
tristate "SiS/XGI display support"
@@ -1408,17 +1396,17 @@ config FB_SIS_315
as XGI V3XT, V5, V8 and Z7.
config FB_VIA
- tristate "VIA UniChrome (Pro) and Chrome9 display support"
- depends on FB && PCI && GPIOLIB && I2C && (X86 || COMPILE_TEST)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select I2C_ALGOBIT
- help
+ tristate "VIA UniChrome (Pro) and Chrome9 display support"
+ depends on FB && PCI && GPIOLIB && I2C && (X86 || COMPILE_TEST)
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select I2C_ALGOBIT
+ help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
CN700/VN800,CX700/VX700,P4M890) and Chrome9 Family (K8M890,CN896
- /P4M900,VX800)
+ /P4M900,VX800)
Say Y if you have a VIA UniChrome graphics board.
To compile this driver as a module, choose M here: the
@@ -1455,7 +1443,7 @@ config FB_NEOMAGIC
select VGASTATE
help
This driver supports notebooks with NeoMagic PCI chips.
- Say Y if you have such a graphics card.
+ Say Y if you have such a graphics card.
To compile this driver as a module, choose M here: the
module will be called neofb.
@@ -1510,7 +1498,7 @@ config FB_VOODOO1
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
- Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
+ Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
Voodoo2 (cvg) based graphics card.
To compile this driver as a module, choose M here: the
@@ -1679,9 +1667,9 @@ config FB_HIT
config FB_PMAG_AA
tristate "PMAG-AA TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the PMAG-AA TURBOchannel framebuffer card (1280x1024x1)
used mainly in the MIPS-based DECstation series.
@@ -1689,9 +1677,9 @@ config FB_PMAG_AA
config FB_PMAG_BA
tristate "PMAG-BA TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the PMAG-BA TURBOchannel framebuffer card (1024x864x8)
used mainly in the MIPS-based DECstation series.
@@ -1699,9 +1687,9 @@ config FB_PMAG_BA
config FB_PMAGB_B
tristate "PMAGB-B TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the PMAGB-B TURBOchannel framebuffer card used mainly
in the MIPS-based DECstation series. The card is currently only
@@ -1710,9 +1698,9 @@ config FB_PMAGB_B
config FB_MAXINE
bool "Maxine (Personal DECstation) onboard framebuffer support"
depends on (FB = y) && MACH_DECSTATION
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Support for the onboard framebuffer (1024x768x8) in the Personal
DECstation series (Personal DECstation 5000/20, /25, /33, /50,
@@ -1721,9 +1709,9 @@ config FB_MAXINE
config FB_G364
bool "G364 frame buffer support"
depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
The G364 driver is the framebuffer used in MIPS Magnum 4000 and
Olivetti M700-10 systems.
@@ -1731,9 +1719,9 @@ config FB_G364
config FB_68328
bool "Motorola 68328 native frame buffer support"
depends on (FB = y) && (M68328 || M68EZ328 || M68VZ328)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
Say Y here if you want to support the built-in frame buffer of
the Motorola 68328 CPU family.
@@ -1812,13 +1800,13 @@ config FB_MBX
Accelerator
config FB_MBX_DEBUG
- bool "Enable debugging info via debugfs"
- depends on FB_MBX && DEBUG_FS
- ---help---
- Enable this if you want debugging information using the debug
- filesystem (debugfs)
+ bool "Enable debugging info via debugfs"
+ depends on FB_MBX && DEBUG_FS
+ ---help---
+ Enable this if you want debugging information using the debug
+ filesystem (debugfs)
- If unsure, say N.
+ If unsure, say N.
config FB_FSL_DIU
tristate "Freescale DIU framebuffer support"
@@ -1834,9 +1822,9 @@ config FB_FSL_DIU
config FB_W100
tristate "W100 frame buffer support"
depends on FB && ARCH_PXA
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
---help---
Frame buffer driver for the w100 as found on the Sharp SL-Cxx series.
It can also drive the w3220 chip found on iPAQ hx4700.
@@ -1901,10 +1889,10 @@ config FB_S3C
Currently the support is only for the S3C6400 and S3C6410 SoCs.
config FB_S3C_DEBUG_REGWRITE
- bool "Debug register writes"
- depends on FB_S3C
- ---help---
- Show all register writes via pr_debug()
+ bool "Debug register writes"
+ depends on FB_S3C
+ ---help---
+ Show all register writes via pr_debug()
config FB_S3C2410
tristate "S3C2410 LCD framebuffer support"
@@ -1930,18 +1918,18 @@ config FB_S3C2410_DEBUG
through sysfs
config FB_NUC900
- tristate "NUC900 LCD framebuffer support"
- depends on FB && ARCH_W90X900
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- ---help---
- Frame buffer driver for the built-in LCD controller in the Nuvoton
- NUC900 processor
+ tristate "NUC900 LCD framebuffer support"
+ depends on FB && ARCH_W90X900
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ Frame buffer driver for the built-in LCD controller in the Nuvoton
+ NUC900 processor
config GPM1040A0_320X240
- bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
- depends on FB_NUC900
+ bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
+ depends on FB_NUC900
config FB_SM501
tristate "Silicon Motion SM501 framebuffer support"
@@ -2183,7 +2171,7 @@ config FB_EP93XX
config FB_PRE_INIT_FB
bool "Don't reinitialize, use bootloader's GDC/Display configuration"
- depends on FB && FB_MB862XX_LIME
+ depends on FB && (FB_MB862XX_LIME || FB_MXS)
---help---
Select this option if display contents should be inherited as set by
the bootloader.
@@ -2192,7 +2180,6 @@ config FB_MX3
tristate "MX3 Framebuffer support"
depends on FB && MX3_IPU
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 846b0c9ea9db..655f2537cac1 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -76,8 +76,6 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
-obj-$(CONFIG_ARCH_NOMADIK) += amba-clcd-nomadik.o
-obj-$(CONFIG_PLAT_VERSATILE_CLCD) += amba-clcd-versatile.o
obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.c b/drivers/video/fbdev/amba-clcd-nomadik.c
deleted file mode 100644
index cd2db1113e67..000000000000
--- a/drivers/video/fbdev/amba-clcd-nomadik.c
+++ /dev/null
@@ -1,251 +0,0 @@
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-
-#include "amba-clcd-nomadik.h"
-
-static struct gpio_desc *grestb;
-static struct gpio_desc *scen;
-static struct gpio_desc *scl;
-static struct gpio_desc *sda;
-
-static u8 tpg110_readwrite_reg(bool write, u8 address, u8 outval)
-{
- int i;
- u8 inval = 0;
-
- /* Assert SCEN */
- gpiod_set_value_cansleep(scen, 1);
- ndelay(150);
- /* Hammer out the address */
- for (i = 5; i >= 0; i--) {
- if (address & BIT(i))
- gpiod_set_value_cansleep(sda, 1);
- else
- gpiod_set_value_cansleep(sda, 0);
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
- }
-
- if (write) {
- /* WRITE */
- gpiod_set_value_cansleep(sda, 0);
- } else {
- /* READ */
- gpiod_set_value_cansleep(sda, 1);
- }
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
-
- if (!write)
- /* HiZ turn-around cycle */
- gpiod_direction_input(sda);
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
-
- /* Hammer in/out the data */
- for (i = 7; i >= 0; i--) {
- int value;
-
- if (write) {
- value = !!(outval & BIT(i));
- gpiod_set_value_cansleep(sda, value);
- } else {
- value = gpiod_get_value(sda);
- if (value)
- inval |= BIT(i);
- }
- ndelay(150);
- /* Send an SCL pulse */
- gpiod_set_value_cansleep(scl, 1);
- ndelay(160);
- gpiod_set_value_cansleep(scl, 0);
- ndelay(160);
- }
-
- gpiod_direction_output(sda, 0);
- /* Deassert SCEN */
- gpiod_set_value_cansleep(scen, 0);
- /* Satisfies SCEN pulse width */
- udelay(1);
-
- return inval;
-}
-
-static u8 tpg110_read_reg(u8 address)
-{
- return tpg110_readwrite_reg(false, address, 0);
-}
-
-static void tpg110_write_reg(u8 address, u8 outval)
-{
- tpg110_readwrite_reg(true, address, outval);
-}
-
-static void tpg110_startup(struct device *dev)
-{
- u8 val;
-
- dev_info(dev, "TPG110 display enable\n");
- /* De-assert the reset signal */
- gpiod_set_value_cansleep(grestb, 0);
- mdelay(1);
- dev_info(dev, "de-asserted GRESTB\n");
-
- /* Test display communication */
- tpg110_write_reg(0x00, 0x55);
- val = tpg110_read_reg(0x00);
- if (val == 0x55)
- dev_info(dev, "passed communication test\n");
- val = tpg110_read_reg(0x01);
- dev_info(dev, "TPG110 chip ID: %d version: %d\n",
- val>>4, val&0x0f);
-
- /* Show display resolution */
- val = tpg110_read_reg(0x02);
- val &= 7;
- switch (val) {
- case 0x0:
- dev_info(dev, "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)");
- break;
- case 0x1:
- dev_info(dev, "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)");
- break;
- case 0x4:
- dev_info(dev, "480x640 RGB");
- break;
- case 0x5:
- dev_info(dev, "480x272 RGB");
- break;
- case 0x6:
- dev_info(dev, "640x480 RGB");
- break;
- case 0x7:
- dev_info(dev, "800x480 RGB");
- break;
- default:
- dev_info(dev, "ILLEGAL RESOLUTION");
- break;
- }
-
- val = tpg110_read_reg(0x03);
- dev_info(dev, "resolution is controlled by %s\n",
- (val & BIT(7)) ? "software" : "hardware");
-}
-
-static void tpg110_enable(struct clcd_fb *fb)
-{
- struct device *dev = &fb->dev->dev;
- static bool startup;
- u8 val;
-
- if (!startup) {
- tpg110_startup(dev);
- startup = true;
- }
-
- /* Take chip out of standby */
- val = tpg110_read_reg(0x03);
- val |= BIT(0);
- tpg110_write_reg(0x03, val);
-}
-
-static void tpg110_disable(struct clcd_fb *fb)
-{
- u8 val;
-
- dev_info(&fb->dev->dev, "TPG110 display disable\n");
- val = tpg110_read_reg(0x03);
- /* Put into standby */
- val &= ~BIT(0);
- tpg110_write_reg(0x03, val);
-}
-
-static void tpg110_init(struct device *dev, struct device_node *np,
- struct clcd_board *board)
-{
- dev_info(dev, "TPG110 display init\n");
-
- /* This asserts the GRESTB signal, putting the display into reset */
- grestb = devm_fwnode_get_gpiod_from_child(dev, "grestb", &np->fwnode,
- GPIOD_OUT_HIGH, "grestb");
- if (IS_ERR(grestb)) {
- dev_err(dev, "no GRESTB GPIO\n");
- return;
- }
- scen = devm_fwnode_get_gpiod_from_child(dev, "scen", &np->fwnode,
- GPIOD_OUT_LOW, "scen");
- if (IS_ERR(scen)) {
- dev_err(dev, "no SCEN GPIO\n");
- return;
- }
- scl = devm_fwnode_get_gpiod_from_child(dev, "scl", &np->fwnode,
- GPIOD_OUT_LOW, "scl");
- if (IS_ERR(scl)) {
- dev_err(dev, "no SCL GPIO\n");
- return;
- }
- sda = devm_fwnode_get_gpiod_from_child(dev, "sda", &np->fwnode,
- GPIOD_OUT_LOW, "sda");
- if (IS_ERR(sda)) {
- dev_err(dev, "no SDA GPIO\n");
- return;
- }
- board->enable = tpg110_enable;
- board->disable = tpg110_disable;
-}
-
-int nomadik_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel)
-{
- if (of_device_is_compatible(panel, "tpo,tpg110"))
- tpg110_init(&fb->dev->dev, panel, fb->board);
- else
- dev_info(&fb->dev->dev, "unknown panel\n");
-
- /* Unknown panel, fall through */
- return 0;
-}
-EXPORT_SYMBOL_GPL(nomadik_clcd_init_panel);
-
-#define PMU_CTRL_OFFSET 0x0000
-#define PMU_CTRL_LCDNDIF BIT(26)
-
-int nomadik_clcd_init_board(struct amba_device *adev,
- struct clcd_board *board)
-{
- struct regmap *pmu_regmap;
-
- dev_info(&adev->dev, "Nomadik CLCD board init\n");
- pmu_regmap =
- syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
- if (IS_ERR(pmu_regmap)) {
- dev_err(&adev->dev, "could not find PMU syscon regmap\n");
- return PTR_ERR(pmu_regmap);
- }
- regmap_update_bits(pmu_regmap,
- PMU_CTRL_OFFSET,
- PMU_CTRL_LCDNDIF,
- 0);
- dev_info(&adev->dev, "set PMU mux to CLCD mode\n");
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(nomadik_clcd_init_board);
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.h b/drivers/video/fbdev/amba-clcd-nomadik.h
deleted file mode 100644
index 462c31381fa1..000000000000
--- a/drivers/video/fbdev/amba-clcd-nomadik.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _AMBA_CLCD_NOMADIK_H
-#define _AMBA_CLCD_NOMADIK_H
-
-#include <linux/amba/bus.h>
-
-#ifdef CONFIG_ARCH_NOMADIK
-int nomadik_clcd_init_board(struct amba_device *adev,
- struct clcd_board *board);
-int nomadik_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel);
-#else
-static inline int nomadik_clcd_init_board(struct amba_device *adev,
- struct clcd_board *board)
-{
- return 0;
-}
-static inline int nomadik_clcd_init_panel(struct clcd_fb *fb,
- struct device_node *panel)
-{
- return 0;
-}
-#endif
-
-#endif /* inclusion guard */
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
deleted file mode 100644
index d42047dc4e4e..000000000000
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ /dev/null
@@ -1,567 +0,0 @@
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/platform_data/video-clcd-versatile.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-#include <linux/bitops.h>
-#include "amba-clcd-versatile.h"
-
-static struct clcd_panel vga = {
- .mode = {
- .name = "VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
- .bpp = 16,
-};
-
-static struct clcd_panel xvga = {
- .mode = {
- .name = "XVGA",
- .refresh = 60,
- .xres = 1024,
- .yres = 768,
- .pixclock = 15748,
- .left_margin = 152,
- .right_margin = 48,
- .upper_margin = 23,
- .lower_margin = 3,
- .hsync_len = 104,
- .vsync_len = 4,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
- .bpp = 16,
-};
-
-/* Sanyo TM38QV67A02A - 3.8 inch QVGA (320x240) Color TFT */
-static struct clcd_panel sanyo_tm38qv67a02a = {
- .mode = {
- .name = "Sanyo TM38QV67A02A",
- .refresh = 116,
- .xres = 320,
- .yres = 240,
- .pixclock = 100000,
- .left_margin = 6,
- .right_margin = 6,
- .upper_margin = 5,
- .lower_margin = 5,
- .hsync_len = 6,
- .vsync_len = 6,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .bpp = 16,
-};
-
-static struct clcd_panel sanyo_2_5_in = {
- .mode = {
- .name = "Sanyo QVGA Portrait",
- .refresh = 116,
- .xres = 240,
- .yres = 320,
- .pixclock = 100000,
- .left_margin = 20,
- .right_margin = 10,
- .upper_margin = 2,
- .lower_margin = 2,
- .hsync_len = 10,
- .vsync_len = 2,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .bpp = 16,
-};
-
-/* Epson L2F50113T00 - 2.2 inch 176x220 Color TFT */
-static struct clcd_panel epson_l2f50113t00 = {
- .mode = {
- .name = "Epson L2F50113T00",
- .refresh = 390,
- .xres = 176,
- .yres = 220,
- .pixclock = 62500,
- .left_margin = 3,
- .right_margin = 2,
- .upper_margin = 1,
- .lower_margin = 0,
- .hsync_len = 3,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .bpp = 16,
-};
-
-static struct clcd_panel *panels[] = {
- &vga,
- &xvga,
- &sanyo_tm38qv67a02a,
- &sanyo_2_5_in,
- &epson_l2f50113t00,
-};
-
-struct clcd_panel *versatile_clcd_get_panel(const char *name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(panels); i++)
- if (strcmp(panels[i]->mode.name, name) == 0)
- break;
-
- if (i < ARRAY_SIZE(panels))
- return panels[i];
-
- pr_err("CLCD: couldn't get parameters for panel %s\n", name);
-
- return NULL;
-}
-
-int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
-{
- dma_addr_t dma;
-
- fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, framesize, &dma,
- GFP_KERNEL);
- if (!fb->fb.screen_base) {
- pr_err("CLCD: unable to map framebuffer\n");
- return -ENOMEM;
- }
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = framesize;
-
- return 0;
-}
-
-int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
-}
-
-void versatile_clcd_remove_dma(struct clcd_fb *fb)
-{
- dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
- fb->fb.fix.smem_start);
-}
-
-#ifdef CONFIG_OF
-
-static struct regmap *versatile_syscon_map;
-static struct regmap *versatile_ib2_map;
-
-/*
- * We detect the different syscon types from the compatible strings.
- */
-enum versatile_clcd {
- INTEGRATOR_CLCD_CM,
- VERSATILE_CLCD,
- REALVIEW_CLCD_EB,
- REALVIEW_CLCD_PB1176,
- REALVIEW_CLCD_PB11MP,
- REALVIEW_CLCD_PBA8,
- REALVIEW_CLCD_PBX,
-};
-
-static const struct of_device_id versatile_clcd_of_match[] = {
- {
- .compatible = "arm,core-module-integrator",
- .data = (void *)INTEGRATOR_CLCD_CM,
- },
- {
- .compatible = "arm,versatile-sysreg",
- .data = (void *)VERSATILE_CLCD,
- },
- {
- .compatible = "arm,realview-eb-syscon",
- .data = (void *)REALVIEW_CLCD_EB,
- },
- {
- .compatible = "arm,realview-pb1176-syscon",
- .data = (void *)REALVIEW_CLCD_PB1176,
- },
- {
- .compatible = "arm,realview-pb11mp-syscon",
- .data = (void *)REALVIEW_CLCD_PB11MP,
- },
- {
- .compatible = "arm,realview-pba8-syscon",
- .data = (void *)REALVIEW_CLCD_PBA8,
- },
- {
- .compatible = "arm,realview-pbx-syscon",
- .data = (void *)REALVIEW_CLCD_PBX,
- },
- {},
-};
-
-/*
- * Core module CLCD control on the Integrator/CP, bits
- * 8 thru 19 of the CM_CONTROL register controls a bunch
- * of CLCD settings.
- */
-#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C
-#define INTEGRATOR_CLCD_LCDBIASEN BIT(8)
-#define INTEGRATOR_CLCD_LCDBIASUP BIT(9)
-#define INTEGRATOR_CLCD_LCDBIASDN BIT(10)
-/* Bits 11,12,13 controls the LCD type */
-#define INTEGRATOR_CLCD_LCDMUX_MASK (BIT(11)|BIT(12)|BIT(13))
-#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11)
-#define INTEGRATOR_CLCD_LCDMUX_VGA565 BIT(12)
-#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12))
-#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13)
-#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13))
-#define INTEGRATOR_CLCD_LCD0_EN BIT(14)
-#define INTEGRATOR_CLCD_LCD1_EN BIT(15)
-/* R/L flip on Sharp */
-#define INTEGRATOR_CLCD_LCD_STATIC1 BIT(16)
-/* U/D flip on Sharp */
-#define INTEGRATOR_CLCD_LCD_STATIC2 BIT(17)
-/* No connection on Sharp */
-#define INTEGRATOR_CLCD_LCD_STATIC BIT(18)
-/* 0 = 24bit VGA, 1 = 18bit VGA */
-#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19)
-
-#define INTEGRATOR_CLCD_MASK (INTEGRATOR_CLCD_LCDBIASEN | \
- INTEGRATOR_CLCD_LCDBIASUP | \
- INTEGRATOR_CLCD_LCDBIASDN | \
- INTEGRATOR_CLCD_LCDMUX_MASK | \
- INTEGRATOR_CLCD_LCD0_EN | \
- INTEGRATOR_CLCD_LCD1_EN | \
- INTEGRATOR_CLCD_LCD_STATIC1 | \
- INTEGRATOR_CLCD_LCD_STATIC2 | \
- INTEGRATOR_CLCD_LCD_STATIC | \
- INTEGRATOR_CLCD_LCD_N24BITEN)
-
-static void integrator_clcd_enable(struct clcd_fb *fb)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val;
-
- dev_info(&fb->dev->dev, "enable Integrator CLCD connectors\n");
-
- /* FIXME: really needed? */
- val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 |
- INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN;
- if (var->bits_per_pixel <= 8 ||
- (var->bits_per_pixel == 16 && var->green.length == 5))
- /* Pseudocolor, RGB555, BGR555 */
- val |= INTEGRATOR_CLCD_LCDMUX_VGA555;
- else if (fb->fb.var.bits_per_pixel <= 16)
- /* truecolor RGB565 */
- val |= INTEGRATOR_CLCD_LCDMUX_VGA565;
- else
- val = 0; /* no idea for this, don't trust the docs */
-
- regmap_update_bits(versatile_syscon_map,
- INTEGRATOR_HDR_CTRL_OFFSET,
- INTEGRATOR_CLCD_MASK,
- val);
-}
-
-/*
- * This configuration register in the Versatile and RealView
- * family is uniformly present but appears more and more
- * unutilized starting with the RealView series.
- */
-#define SYS_CLCD 0x50
-#define SYS_CLCD_MODE_MASK (BIT(0)|BIT(1))
-#define SYS_CLCD_MODE_888 0
-#define SYS_CLCD_MODE_5551 BIT(0)
-#define SYS_CLCD_MODE_565_R_LSB BIT(1)
-#define SYS_CLCD_MODE_565_B_LSB (BIT(0)|BIT(1))
-#define SYS_CLCD_CONNECTOR_MASK (BIT(2)|BIT(3)|BIT(4)|BIT(5))
-#define SYS_CLCD_NLCDIOON BIT(2)
-#define SYS_CLCD_VDDPOSSWITCH BIT(3)
-#define SYS_CLCD_PWR3V5SWITCH BIT(4)
-#define SYS_CLCD_VDDNEGSWITCH BIT(5)
-#define SYS_CLCD_TSNSS BIT(6) /* touchscreen enable */
-#define SYS_CLCD_SSPEXP BIT(7) /* SSP expansion enable */
-
-/* The Versatile can detect the connected panel type */
-#define SYS_CLCD_CLCDID_MASK (BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12))
-#define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8)
-#define SYS_CLCD_ID_SHARP_8_4 (0x01 << 8)
-#define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8)
-#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8)
-#define SYS_CLCD_ID_VGA (0x1f << 8)
-
-#define SYS_CLCD_TSNDAV BIT(13) /* data ready from TS */
-
-/* IB2 control register for the Versatile daughterboard */
-#define IB2_CTRL 0x00
-#define IB2_CTRL_LCD_SD BIT(1) /* 1 = shut down LCD */
-#define IB2_CTRL_LCD_BL_ON BIT(0)
-#define IB2_CTRL_LCD_MASK (BIT(0)|BIT(1))
-
-static void versatile_clcd_disable(struct clcd_fb *fb)
-{
- dev_info(&fb->dev->dev, "disable Versatile CLCD connectors\n");
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- 0);
-
- /* If we're on an IB2 daughterboard, turn off display */
- if (versatile_ib2_map) {
- dev_info(&fb->dev->dev, "disable IB2 display\n");
- regmap_update_bits(versatile_ib2_map,
- IB2_CTRL,
- IB2_CTRL_LCD_MASK,
- IB2_CTRL_LCD_SD);
- }
-}
-
-static void versatile_clcd_enable(struct clcd_fb *fb)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val = 0;
-
- dev_info(&fb->dev->dev, "enable Versatile CLCD connectors\n");
- switch (var->green.length) {
- case 5:
- val |= SYS_CLCD_MODE_5551;
- break;
- case 6:
- if (var->red.offset == 0)
- val |= SYS_CLCD_MODE_565_R_LSB;
- else
- val |= SYS_CLCD_MODE_565_B_LSB;
- break;
- case 8:
- val |= SYS_CLCD_MODE_888;
- break;
- }
-
- /* Set up the MUX */
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_MODE_MASK,
- val);
-
- /* Then enable the display */
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
-
- /* If we're on an IB2 daughterboard, turn on display */
- if (versatile_ib2_map) {
- dev_info(&fb->dev->dev, "enable IB2 display\n");
- regmap_update_bits(versatile_ib2_map,
- IB2_CTRL,
- IB2_CTRL_LCD_MASK,
- IB2_CTRL_LCD_BL_ON);
- }
-}
-
-static void versatile_clcd_decode(struct clcd_fb *fb, struct clcd_regs *regs)
-{
- clcdfb_decode(fb, regs);
-
- /* Always clear BGR for RGB565: we do the routing externally */
- if (fb->fb.var.green.length == 6)
- regs->cntl &= ~CNTL_BGR;
-}
-
-static void realview_clcd_disable(struct clcd_fb *fb)
-{
- dev_info(&fb->dev->dev, "disable RealView CLCD connectors\n");
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- 0);
-}
-
-static void realview_clcd_enable(struct clcd_fb *fb)
-{
- dev_info(&fb->dev->dev, "enable RealView CLCD connectors\n");
- regmap_update_bits(versatile_syscon_map,
- SYS_CLCD,
- SYS_CLCD_CONNECTOR_MASK,
- SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
-}
-
-struct versatile_panel {
- u32 id;
- char *compatible;
- bool ib2;
-};
-
-static const struct versatile_panel versatile_panels[] = {
- {
- .id = SYS_CLCD_ID_VGA,
- .compatible = "VGA",
- },
- {
- .id = SYS_CLCD_ID_SANYO_3_8,
- .compatible = "sanyo,tm38qv67a02a",
- },
- {
- .id = SYS_CLCD_ID_SHARP_8_4,
- .compatible = "sharp,lq084v1dg21",
- },
- {
- .id = SYS_CLCD_ID_EPSON_2_2,
- .compatible = "epson,l2f50113t00",
- },
- {
- .id = SYS_CLCD_ID_SANYO_2_5,
- .compatible = "sanyo,alr252rgt",
- .ib2 = true,
- },
-};
-
-static void versatile_panel_probe(struct device *dev, struct device_node *panel)
-{
- struct versatile_panel const *vpanel = NULL;
- u32 val;
- int ret;
- int i;
-
- /*
- * The Versatile CLCD has a panel auto-detection mechanism.
- * We use this and look for the compatible panel in the
- * device tree.
- */
- ret = regmap_read(versatile_syscon_map, SYS_CLCD, &val);
- if (ret) {
- dev_err(dev, "cannot read CLCD syscon register\n");
- return;
- }
- val &= SYS_CLCD_CLCDID_MASK;
-
- /* First find corresponding panel information */
- for (i = 0; i < ARRAY_SIZE(versatile_panels); i++) {
- vpanel = &versatile_panels[i];
-
- if (val == vpanel->id) {
- dev_err(dev, "autodetected panel \"%s\"\n",
- vpanel->compatible);
- break;
- }
- }
- if (i == ARRAY_SIZE(versatile_panels)) {
- dev_err(dev, "could not auto-detect panel\n");
- return;
- }
-
- if (!of_device_is_compatible(panel, vpanel->compatible))
- dev_err(dev, "panel in DT is not compatible with the "
- "auto-detected panel, continuing anyway\n");
-
- /*
- * If we have a Sanyo 2.5" port
- * that we're running on an IB2 and proceed to look for the
- * IB2 syscon regmap.
- */
- if (!vpanel->ib2)
- return;
-
- versatile_ib2_map = syscon_regmap_lookup_by_compatible(
- "arm,versatile-ib2-syscon");
- if (IS_ERR(versatile_ib2_map)) {
- dev_err(dev, "could not locate IB2 control register\n");
- versatile_ib2_map = NULL;
- return;
- }
-}
-
-int versatile_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel)
-{
- const struct of_device_id *clcd_id;
- enum versatile_clcd versatile_clcd_type;
- struct device_node *np;
- struct regmap *map;
- struct device *dev = &fb->dev->dev;
-
- np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
- &clcd_id);
- if (!np) {
- /* Vexpress does not have this */
- return 0;
- }
- versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
-
- map = syscon_node_to_regmap(np);
- if (IS_ERR(map)) {
- dev_err(dev, "no Versatile syscon regmap\n");
- return PTR_ERR(map);
- }
-
- switch (versatile_clcd_type) {
- case INTEGRATOR_CLCD_CM:
- versatile_syscon_map = map;
- fb->board->enable = integrator_clcd_enable;
- /* Override the caps, we have only these */
- fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 |
- CLCD_CAP_888;
- dev_info(dev, "set up callbacks for Integrator PL110\n");
- break;
- case VERSATILE_CLCD:
- versatile_syscon_map = map;
- fb->board->enable = versatile_clcd_enable;
- fb->board->disable = versatile_clcd_disable;
- fb->board->decode = versatile_clcd_decode;
- versatile_panel_probe(dev, panel);
- dev_info(dev, "set up callbacks for Versatile\n");
- break;
- case REALVIEW_CLCD_EB:
- case REALVIEW_CLCD_PB1176:
- case REALVIEW_CLCD_PB11MP:
- case REALVIEW_CLCD_PBA8:
- case REALVIEW_CLCD_PBX:
- versatile_syscon_map = map;
- fb->board->enable = realview_clcd_enable;
- fb->board->disable = realview_clcd_disable;
- dev_info(dev, "set up callbacks for RealView PL111\n");
- break;
- default:
- dev_info(dev, "unknown Versatile system controller\n");
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(versatile_clcd_init_panel);
-#endif
diff --git a/drivers/video/fbdev/amba-clcd-versatile.h b/drivers/video/fbdev/amba-clcd-versatile.h
deleted file mode 100644
index b20baa47e6ad..000000000000
--- a/drivers/video/fbdev/amba-clcd-versatile.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Special local versatile callbacks
- */
-#include <linux/of.h>
-#include <linux/amba/bus.h>
-#include <linux/platform_data/video-clcd-versatile.h>
-
-#if defined(CONFIG_PLAT_VERSATILE_CLCD) && defined(CONFIG_OF)
-int versatile_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel);
-#else
-static inline int versatile_clcd_init_panel(struct clcd_fb *fb,
- struct device_node *panel)
-{
- return 0;
-}
-#endif
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 38c1f324ce15..89324e42a033 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -30,9 +30,6 @@
#include <video/of_display_timing.h>
#include <video/videomode.h>
-#include "amba-clcd-nomadik.h"
-#include "amba-clcd-versatile.h"
-
#define to_clcd(info) container_of(info, struct clcd_fb, fb)
/* This is limited to 16 characters when displayed by X startup */
@@ -223,15 +220,6 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
var->blue.length = 4;
}
break;
- case 24:
- if (fb->vendor->packed_24_bit_pixels) {
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- } else {
- ret = -EINVAL;
- }
- break;
case 32:
/* If we can't do 888, reject */
caps &= CLCD_CAP_888;
@@ -318,12 +306,6 @@ static int clcdfb_set_par(struct fb_info *info)
clcdfb_disable(fb);
- /* Some variants must be clocked here */
- if (fb->vendor->clock_timregs && !fb->clk_enabled) {
- fb->clk_enabled = true;
- clk_enable(fb->clk);
- }
-
writel(regs.tim0, fb->regs + CLCD_TIM0);
writel(regs.tim1, fb->regs + CLCD_TIM1);
writel(regs.tim2, fb->regs + CLCD_TIM2);
@@ -465,14 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
fb->off_ienb = CLCD_PL111_IENB;
fb->off_cntl = CLCD_PL111_CNTL;
} else {
- if (of_machine_is_compatible("arm,versatile-ab") ||
- of_machine_is_compatible("arm,versatile-pb")) {
- fb->off_ienb = CLCD_PL111_IENB;
- fb->off_cntl = CLCD_PL111_CNTL;
- } else {
- fb->off_ienb = CLCD_PL110_IENB;
- fb->off_cntl = CLCD_PL110_CNTL;
- }
+ fb->off_ienb = CLCD_PL110_IENB;
+ fb->off_cntl = CLCD_PL110_CNTL;
}
fb->clk = clk_get(&fb->dev->dev, NULL);
@@ -713,42 +689,6 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
if (r0 != 0 && b0 == 0)
fb->panel->bgr_connection = true;
- if (fb->panel->caps && fb->vendor->st_bitmux_control) {
- /*
- * Set up the special bits for the Nomadik control register
- * (other platforms tend to do this through an external
- * register).
- */
-
- /* Offset of the highest used color */
- int maxoff = max3(r0, g0, b0);
- /* Most significant bit out, highest used bit */
- int msb = 0;
-
- if (fb->panel->caps & CLCD_CAP_888) {
- msb = maxoff + 8 - 1;
- } else if (fb->panel->caps & CLCD_CAP_565) {
- msb = maxoff + 5 - 1;
- fb->panel->cntl |= CNTL_ST_1XBPP_565;
- } else if (fb->panel->caps & CLCD_CAP_5551) {
- msb = maxoff + 5 - 1;
- fb->panel->cntl |= CNTL_ST_1XBPP_5551;
- } else if (fb->panel->caps & CLCD_CAP_444) {
- msb = maxoff + 4 - 1;
- fb->panel->cntl |= CNTL_ST_1XBPP_444;
- }
-
- /* Send out as many bits as we need */
- if (msb > 17)
- fb->panel->cntl |= CNTL_ST_CDWID_24;
- else if (msb > 15)
- fb->panel->cntl |= CNTL_ST_CDWID_18;
- else if (msb > 11)
- fb->panel->cntl |= CNTL_ST_CDWID_16;
- else
- fb->panel->cntl |= CNTL_ST_CDWID_12;
- }
-
return fb->panel->caps ? 0 : -EINVAL;
}
@@ -775,12 +715,6 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (!panel)
return -ENODEV;
- if (fb->vendor->init_panel) {
- err = fb->vendor->init_panel(fb, panel);
- if (err)
- return err;
- }
-
err = clcdfb_of_get_backlight(panel, fb->panel);
if (err)
return err;
@@ -941,7 +875,6 @@ static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev_get_platdata(&dev->dev);
- struct clcd_vendor_data *vendor = id->data;
struct clcd_fb *fb;
int ret;
@@ -951,12 +884,6 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
if (!board)
return -EINVAL;
- if (vendor->init_board) {
- ret = vendor->init_board(dev, board);
- if (ret)
- return ret;
- }
-
ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (ret)
goto out;
@@ -974,7 +901,6 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
}
fb->dev = dev;
- fb->vendor = vendor;
fb->board = board;
dev_info(&fb->dev->dev, "PL%03x designer %02x rev%u at 0x%08llx\n",
@@ -1021,30 +947,10 @@ static int clcdfb_remove(struct amba_device *dev)
return 0;
}
-static struct clcd_vendor_data vendor_arm = {
- /* Sets up the versatile board displays */
- .init_panel = versatile_clcd_init_panel,
-};
-
-static struct clcd_vendor_data vendor_nomadik = {
- .clock_timregs = true,
- .packed_24_bit_pixels = true,
- .st_bitmux_control = true,
- .init_board = nomadik_clcd_init_board,
- .init_panel = nomadik_clcd_init_panel,
-};
-
static const struct amba_id clcdfb_id_table[] = {
{
.id = 0x00041110,
.mask = 0x000ffffe,
- .data = &vendor_arm,
- },
- /* ST Electronics Nomadik variant */
- {
- .id = 0x00180110,
- .mask = 0x00fffffe,
- .data = &vendor_nomadik,
},
{ 0, 0 },
};
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index fcd2dd670a65..b986af2a8042 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -47,7 +47,6 @@
#define ATAFB_EXT
#define ATAFB_FALCON
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -55,6 +54,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
@@ -3073,28 +3073,22 @@ int __init atafb_setup(char *options)
return 0;
}
-int __init atafb_init(void)
+static int __init atafb_probe(struct platform_device *pdev)
{
int pad, detected_mode, error;
unsigned int defmode = 0;
unsigned long mem_req;
-
-#ifndef MODULE
char *option = NULL;
if (fb_get_options("atafb", &option))
return -ENODEV;
atafb_setup(option);
-#endif
- printk("atafb_init: start\n");
-
- if (!MACH_IS_ATARI)
- return -ENODEV;
+ dev_dbg(&pdev->dev, "%s: start\n", __func__);
do {
#ifdef ATAFB_EXT
if (external_addr) {
- printk("atafb_init: initializing external hw\n");
+ dev_dbg(&pdev->dev, "initializing external hw\n");
fbhw = &ext_switch;
atafb_ops.fb_setcolreg = &ext_setcolreg;
defmode = DEFMODE_EXT;
@@ -3103,7 +3097,7 @@ int __init atafb_init(void)
#endif
#ifdef ATAFB_TT
if (ATARIHW_PRESENT(TT_SHIFTER)) {
- printk("atafb_init: initializing TT hw\n");
+ dev_dbg(&pdev->dev, "initializing TT hw\n");
fbhw = &tt_switch;
atafb_ops.fb_setcolreg = &tt_setcolreg;
defmode = DEFMODE_TT;
@@ -3112,7 +3106,7 @@ int __init atafb_init(void)
#endif
#ifdef ATAFB_FALCON
if (ATARIHW_PRESENT(VIDEL_SHIFTER)) {
- printk("atafb_init: initializing Falcon hw\n");
+ dev_dbg(&pdev->dev, "initializing Falcon hw\n");
fbhw = &falcon_switch;
atafb_ops.fb_setcolreg = &falcon_setcolreg;
error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 0,
@@ -3127,7 +3121,7 @@ int __init atafb_init(void)
#ifdef ATAFB_STE
if (ATARIHW_PRESENT(STND_SHIFTER) ||
ATARIHW_PRESENT(EXTD_SHIFTER)) {
- printk("atafb_init: initializing ST/E hw\n");
+ dev_dbg(&pdev->dev, "initializing ST/E hw\n");
fbhw = &st_switch;
atafb_ops.fb_setcolreg = &stste_setcolreg;
defmode = DEFMODE_STE;
@@ -3135,7 +3129,8 @@ int __init atafb_init(void)
}
fbhw = &st_switch;
atafb_ops.fb_setcolreg = &stste_setcolreg;
- printk("Cannot determine video hardware; defaulting to ST(e)\n");
+ dev_warn(&pdev->dev,
+ "Cannot determine video hardware; defaulting to ST(e)\n");
#else /* ATAFB_STE */
/* no default driver included */
/* Nobody will ever see this message :-) */
@@ -3175,8 +3170,8 @@ int __init atafb_init(void)
kernel_set_cachemode(screen_base, screen_len,
IOMAP_WRITETHROUGH);
}
- printk("atafb: screen_base %p phys_screen_base %lx screen_len %d\n",
- screen_base, phys_screen_base, screen_len);
+ dev_info(&pdev->dev, "phys_screen_base %lx screen_len %d\n",
+ phys_screen_base, screen_len);
#ifdef ATAFB_EXT
} else {
/* Map the video memory (physical address given) to somewhere
@@ -3223,12 +3218,12 @@ int __init atafb_init(void)
fb_alloc_cmap(&(fb_info.cmap), 1 << fb_info.var.bits_per_pixel, 0);
- printk("Determined %dx%d, depth %d\n",
- fb_info.var.xres, fb_info.var.yres, fb_info.var.bits_per_pixel);
+ dev_info(&pdev->dev, "Determined %dx%d, depth %d\n", fb_info.var.xres,
+ fb_info.var.yres, fb_info.var.bits_per_pixel);
if ((fb_info.var.xres != fb_info.var.xres_virtual) ||
(fb_info.var.yres != fb_info.var.yres_virtual))
- printk(" virtual %dx%d\n", fb_info.var.xres_virtual,
- fb_info.var.yres_virtual);
+ dev_info(&pdev->dev, " virtual %dx%d\n",
+ fb_info.var.xres_virtual, fb_info.var.yres_virtual);
if (register_framebuffer(&fb_info) < 0) {
#ifdef ATAFB_EXT
@@ -3251,14 +3246,32 @@ int __init atafb_init(void)
return 0;
}
-module_init(atafb_init);
+static void atafb_shutdown(struct platform_device *pdev)
+{
+ /* Unblank before kexec */
+ if (fbhw->blank)
+ fbhw->blank(0);
+}
-#ifdef MODULE
-MODULE_LICENSE("GPL");
+static struct platform_driver atafb_driver = {
+ .shutdown = atafb_shutdown,
+ .driver = {
+ .name = "atafb",
+ },
+};
-int cleanup_module(void)
+static int __init atafb_init(void)
{
- unregister_framebuffer(&fb_info);
- return atafb_deinit();
+ struct platform_device *pdev;
+
+ if (!MACH_IS_ATARI)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("atafb", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return platform_driver_probe(&atafb_driver, atafb_probe);
}
-#endif /* MODULE */
+
+device_initcall(atafb_init);
diff --git a/drivers/video/fbdev/atafb_iplan2p2.c b/drivers/video/fbdev/atafb_iplan2p2.c
index 8cc9c50379d0..a1660c24bf36 100644
--- a/drivers/video/fbdev/atafb_iplan2p2.c
+++ b/drivers/video/fbdev/atafb_iplan2p2.c
@@ -10,7 +10,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -269,25 +268,3 @@ void atafb_iplan2p2_linefill(struct fb_info *info, u_long next_line,
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_iplan2p2_copyarea);
-EXPORT_SYMBOL(atafb_iplan2p2_fillrect);
-EXPORT_SYMBOL(atafb_iplan2p2_linefill);
diff --git a/drivers/video/fbdev/atafb_iplan2p4.c b/drivers/video/fbdev/atafb_iplan2p4.c
index bee0d89463f7..663d66582d79 100644
--- a/drivers/video/fbdev/atafb_iplan2p4.c
+++ b/drivers/video/fbdev/atafb_iplan2p4.c
@@ -10,7 +10,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -284,25 +283,3 @@ void atafb_iplan2p4_linefill(struct fb_info *info, u_long next_line,
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_iplan2p4_copyarea);
-EXPORT_SYMBOL(atafb_iplan2p4_fillrect);
-EXPORT_SYMBOL(atafb_iplan2p4_linefill);
diff --git a/drivers/video/fbdev/atafb_iplan2p8.c b/drivers/video/fbdev/atafb_iplan2p8.c
index 356fb52ce443..39a6cbbb6ca3 100644
--- a/drivers/video/fbdev/atafb_iplan2p8.c
+++ b/drivers/video/fbdev/atafb_iplan2p8.c
@@ -10,7 +10,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -321,25 +320,3 @@ void atafb_iplan2p8_linefill(struct fb_info *info, u_long next_line,
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_iplan2p8_copyarea);
-EXPORT_SYMBOL(atafb_iplan2p8_fillrect);
-EXPORT_SYMBOL(atafb_iplan2p8_linefill);
diff --git a/drivers/video/fbdev/atafb_mfb.c b/drivers/video/fbdev/atafb_mfb.c
index 6a352d62eecf..384fd3e4d3e1 100644
--- a/drivers/video/fbdev/atafb_mfb.c
+++ b/drivers/video/fbdev/atafb_mfb.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
@@ -88,25 +87,3 @@ void atafb_mfb_linefill(struct fb_info *info, u_long next_line,
*dest++ = *data++;
}
}
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif /* MODULE */
-
-
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(atafb_mfb_copyarea);
-EXPORT_SYMBOL(atafb_mfb_fillrect);
-EXPORT_SYMBOL(atafb_mfb_linefill);
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 4ed55e6bbb84..e67dfd94bf1d 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1,5 +1,5 @@
/*
- * Driver for AT91/AT32 LCD Controller
+ * Driver for AT91 LCD Controller
*
* Copyright (C) 2007 Atmel Corporation
*
@@ -99,86 +99,6 @@ static struct atmel_lcdfb_config at91sam9rl_config = {
.have_intensity_bit = true,
};
-static struct atmel_lcdfb_config at32ap_config = {
- .have_hozval = true,
-};
-
-static const struct platform_device_id atmel_lcdfb_devtypes[] = {
- {
- .name = "at91sam9261-lcdfb",
- .driver_data = (unsigned long)&at91sam9261_config,
- }, {
- .name = "at91sam9263-lcdfb",
- .driver_data = (unsigned long)&at91sam9263_config,
- }, {
- .name = "at91sam9g10-lcdfb",
- .driver_data = (unsigned long)&at91sam9g10_config,
- }, {
- .name = "at91sam9g45-lcdfb",
- .driver_data = (unsigned long)&at91sam9g45_config,
- }, {
- .name = "at91sam9g45es-lcdfb",
- .driver_data = (unsigned long)&at91sam9g45es_config,
- }, {
- .name = "at91sam9rl-lcdfb",
- .driver_data = (unsigned long)&at91sam9rl_config,
- }, {
- .name = "at32ap-lcdfb",
- .driver_data = (unsigned long)&at32ap_config,
- }, {
- /* terminator */
- }
-};
-MODULE_DEVICE_TABLE(platform, atmel_lcdfb_devtypes);
-
-static struct atmel_lcdfb_config *
-atmel_lcdfb_get_config(struct platform_device *pdev)
-{
- unsigned long data;
-
- data = platform_get_device_id(pdev)->driver_data;
-
- return (struct atmel_lcdfb_config *)data;
-}
-
-#if defined(CONFIG_ARCH_AT91)
-#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
- | FBINFO_PARTIAL_PAN_OK \
- | FBINFO_HWACCEL_YPAN)
-
-static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
- struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
-
-}
-#elif defined(CONFIG_AVR32)
-#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
- | FBINFO_PARTIAL_PAN_OK \
- | FBINFO_HWACCEL_XPAN \
- | FBINFO_HWACCEL_YPAN)
-
-static void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
- struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- u32 dma2dcfg;
- u32 pixeloff;
-
- pixeloff = (var->xoffset * info->var.bits_per_pixel) & 0x1f;
-
- dma2dcfg = (info->var.xres_virtual - info->var.xres)
- * info->var.bits_per_pixel / 8;
- dma2dcfg |= pixeloff << ATMEL_LCDC_PIXELOFF_OFFSET;
- lcdc_writel(sinfo, ATMEL_LCDC_DMA2DCFG, dma2dcfg);
-
- /* Update configuration */
- lcdc_writel(sinfo, ATMEL_LCDC_DMACON,
- lcdc_readl(sinfo, ATMEL_LCDC_DMACON)
- | ATMEL_LCDC_DMAUPDT);
-}
-#endif
-
static u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
| ATMEL_LCDC_POL_POSITIVE
| ATMEL_LCDC_ENA_PWMENABLE;
@@ -404,8 +324,6 @@ static void atmel_lcdfb_update_dma(struct fb_info *info,
/* Set framebuffer DMA base address and pixel offset */
lcdc_writel(sinfo, ATMEL_LCDC_DMABADDR1, dma_addr);
-
- atmel_lcdfb_update_dma2d(sinfo, var, info);
}
static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
@@ -978,7 +896,6 @@ static void atmel_lcdfb_stop_clock(struct atmel_lcdfb_info *sinfo)
clk_disable_unprepare(sinfo->lcdc_clk);
}
-#ifdef CONFIG_OF
static const struct of_device_id atmel_lcdfb_dt_ids[] = {
{ .compatible = "atmel,at91sam9261-lcdc" , .data = &at91sam9261_config, },
{ .compatible = "atmel,at91sam9263-lcdc" , .data = &at91sam9263_config, },
@@ -986,7 +903,6 @@ static const struct of_device_id atmel_lcdfb_dt_ids[] = {
{ .compatible = "atmel,at91sam9g45-lcdc" , .data = &at91sam9g45_config, },
{ .compatible = "atmel,at91sam9g45es-lcdc" , .data = &at91sam9g45es_config, },
{ .compatible = "atmel,at91sam9rl-lcdc" , .data = &at91sam9rl_config, },
- { .compatible = "atmel,at32ap-lcdc" , .data = &at32ap_config, },
{ /* sentinel */ }
};
@@ -1122,19 +1038,12 @@ put_display_node:
of_node_put(display_np);
return ret;
}
-#else
-static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
-{
- return 0;
-}
-#endif
static int __init atmel_lcdfb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fb_info *info;
struct atmel_lcdfb_info *sinfo;
- struct atmel_lcdfb_pdata *pdata = NULL;
struct resource *regs = NULL;
struct resource *map = NULL;
struct fb_modelist *modelist;
@@ -1159,21 +1068,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
ret = atmel_lcdfb_of_init(sinfo);
if (ret)
goto free_info;
- } else if (dev_get_platdata(dev)) {
- struct fb_monspecs *monspecs;
- int i;
-
- pdata = dev_get_platdata(dev);
- monspecs = pdata->default_monspecs;
- sinfo->pdata = *pdata;
-
- for (i = 0; i < monspecs->modedb_len; i++)
- fb_add_videomode(&monspecs->modedb[i], &info->modelist);
-
- sinfo->config = atmel_lcdfb_get_config(pdev);
-
- info->var.bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
- memcpy(&info->monspecs, pdata->default_monspecs, sizeof(info->monspecs));
} else {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
@@ -1186,7 +1080,8 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
if (IS_ERR(sinfo->reg_lcd))
sinfo->reg_lcd = NULL;
- info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_HWACCEL_YPAN;
info->pseudo_palette = sinfo->pseudo_palette;
info->fbops = &atmel_lcdfb_ops;
@@ -1357,12 +1252,10 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fb_info *info = dev_get_drvdata(dev);
struct atmel_lcdfb_info *sinfo;
- struct atmel_lcdfb_pdata *pdata;
if (!info || !info->par)
return 0;
sinfo = info->par;
- pdata = &sinfo->pdata;
cancel_work_sync(&sinfo->task);
exit_backlight(sinfo);
@@ -1435,7 +1328,6 @@ static struct platform_driver atmel_lcdfb_driver = {
.remove = __exit_p(atmel_lcdfb_remove),
.suspend = atmel_lcdfb_suspend,
.resume = atmel_lcdfb_resume,
- .id_table = atmel_lcdfb_devtypes,
.driver = {
.name = "atmel_lcdfb",
.of_match_table = of_match_ptr(atmel_lcdfb_dt_ids),
@@ -1444,6 +1336,6 @@ static struct platform_driver atmel_lcdfb_driver = {
module_platform_driver_probe(atmel_lcdfb_driver, atmel_lcdfb_probe);
-MODULE_DESCRIPTION("AT91/AT32 LCD Controller framebuffer driver");
+MODULE_DESCRIPTION("AT91 LCD Controller framebuffer driver");
MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 68a113594808..2811c4afde01 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
int size = len * sizeof(u16);
int ret = -ENOMEM;
+ flags |= __GFP_NOWARN;
+
if (cmap->len != len) {
fb_dealloc_cmap(cmap);
if (!len)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index cd059a801662..786f9aab55df 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1069,7 +1069,7 @@ static void fbcon_init(struct vc_data *vc, int init)
cap = info->flags;
- if (console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
+ if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
logo_shown = FBCON_LOGO_DONTSHOW;
if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 4721491e6c8c..d1949c92be98 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1882,14 +1882,35 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const
{
struct apertures_struct *ap;
bool primary = false;
- int err;
+ int err, idx, bar;
+ bool res_id_found = false;
+
+ for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ continue;
+ idx++;
+ }
- ap = alloc_apertures(1);
+ ap = alloc_apertures(idx);
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = pci_resource_start(pdev, res_id);
- ap->ranges[0].size = pci_resource_len(pdev, res_id);
+ for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ continue;
+ ap->ranges[idx].base = pci_resource_start(pdev, bar);
+ ap->ranges[idx].size = pci_resource_len(pdev, bar);
+ pci_info(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
+ (unsigned long)pci_resource_start(pdev, bar),
+ (unsigned long)pci_resource_end(pdev, bar));
+ idx++;
+ if (res_id == bar)
+ res_id_found = true;
+ }
+ if (!res_id_found)
+ pci_warn(pdev, "%s: passed res_id (%d) is not a memory bar\n",
+ __func__, res_id);
+
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW;
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 283d9307df21..ac049871704d 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -935,6 +935,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
if (var->vmode & FB_VMODE_DOUBLE)
vtotal *= 2;
+ if (!htotal || !vtotal)
+ return;
+
hfreq = pixclock/htotal;
mode->refresh = hfreq/vtotal;
}
diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c
index d9e816d53531..1bddcc20b2c0 100644
--- a/drivers/video/fbdev/fb-puv3.c
+++ b/drivers/video/fbdev/fb-puv3.c
@@ -20,7 +20,7 @@
#include <linux/console.h>
#include <linux/mm.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/pgtable.h>
#include <mach/hardware.h>
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 463028543173..59e1cae57948 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -285,6 +285,8 @@ static int hga_card_detect(void)
hga_vram_len = 0x08000;
hga_vram = ioremap(0xb0000, hga_vram_len);
+ if (!hga_vram)
+ goto error;
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 4b9615e4ce74..35bba3c2036d 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1515,6 +1515,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
+ if (!info->screen_base) {
+ release_mem_region(addr, size);
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
par->cmap_regs_phys = addr + 0x840000;
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index e707e617bf1c..8820a556014c 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -120,10 +120,7 @@ struct jet_cmap_regs {
#define PIXEL_TO_MM(a) (((a)*10)/28) /* width in mm at 72 dpi */
static struct fb_var_screeninfo macfb_defined = {
- .bits_per_pixel = 8,
.activate = FB_ACTIVATE_NOW,
- .width = -1,
- .height = -1,
.right_margin = 32,
.upper_margin = 16,
.lower_margin = 4,
@@ -139,7 +136,6 @@ static struct fb_fix_screeninfo macfb_fix = {
static void *slot_addr;
static struct fb_info fb_info;
static u32 pseudo_palette[16];
-static int inverse;
static int vidtest;
/*
@@ -152,7 +148,7 @@ static int dafb_setpalette(unsigned int regno, unsigned int red,
unsigned int green, unsigned int blue,
struct fb_info *info)
{
- static int lastreg = -1;
+ static int lastreg = -2;
unsigned long flags;
local_irq_save(flags);
@@ -201,9 +197,6 @@ static int v8_brazil_setpalette(unsigned int regno, unsigned int red,
unsigned int bpp = info->var.bits_per_pixel;
unsigned long flags;
- if (bpp > 8)
- return 1; /* failsafe */
-
local_irq_save(flags);
/* On these chips, the CLUT register numbers are spread out
@@ -234,9 +227,6 @@ static int rbv_setpalette(unsigned int regno, unsigned int red,
{
unsigned long flags;
- if (info->var.bits_per_pixel > 8)
- return 1; /* failsafe */
-
local_irq_save(flags);
/* From the VideoToolbox driver. Seems to be saying that
@@ -353,9 +343,6 @@ static int civic_setpalette(unsigned int regno, unsigned int red,
unsigned long flags;
int clut_status;
- if (info->var.bits_per_pixel > 8)
- return 1; /* failsafe */
-
local_irq_save(flags);
/* Set the register address */
@@ -532,7 +519,7 @@ static void __init macfb_setup(char *options)
continue;
if (!strcmp(this_opt, "inverse"))
- inverse = 1;
+ fb_invert_cmaps();
else
if (!strcmp(this_opt, "vidtest"))
vidtest = 1; /* enable experimental CLUT code */
@@ -688,17 +675,14 @@ static int __init macfb_init(void)
case NUBUS_DRHW_APPLE_MDC:
strcpy(macfb_fix.id, "Mac Disp. Card");
macfb_setpalette = mdc_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
case NUBUS_DRHW_APPLE_TFB:
strcpy(macfb_fix.id, "Toby");
macfb_setpalette = toby_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
case NUBUS_DRHW_APPLE_JET:
strcpy(macfb_fix.id, "Jet");
macfb_setpalette = jet_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
default:
strcpy(macfb_fix.id, "Generic NuBus");
@@ -731,7 +715,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "DAFB");
macfb_setpalette = dafb_setpalette;
dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -741,7 +724,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "V8");
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -755,7 +737,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "Brazil");
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -772,7 +753,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "Sonora");
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -785,7 +765,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "RBV");
macfb_setpalette = rbv_setpalette;
rbv_cmap_regs = ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -796,7 +775,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "Civic");
macfb_setpalette = civic_setpalette;
civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
@@ -810,7 +788,6 @@ static int __init macfb_init(void)
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs =
ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
}
break;
@@ -823,7 +800,6 @@ static int __init macfb_init(void)
macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs =
ioremap(DAC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
}
break;
@@ -892,7 +868,6 @@ static int __init macfb_init(void)
strcpy(macfb_fix.id, "CSC");
macfb_setpalette = csc_setpalette;
csc_cmap_regs = ioremap(CSC_BASE, 0x1000);
- macfb_defined.activate = FB_ACTIVATE_NOW;
break;
default:
diff --git a/drivers/video/fbdev/mmp/Kconfig b/drivers/video/fbdev/mmp/Kconfig
index f56a7e2e8136..1b5e80c8a984 100644
--- a/drivers/video/fbdev/mmp/Kconfig
+++ b/drivers/video/fbdev/mmp/Kconfig
@@ -1,7 +1,7 @@
menuconfig MMP_DISP
- tristate "Marvell MMP Display Subsystem support"
- depends on CPU_PXA910 || CPU_MMP2
- help
+ tristate "Marvell MMP Display Subsystem support"
+ depends on CPU_PXA910 || CPU_MMP2
+ help
Marvell Display Subsystem support.
if MMP_DISP
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index 12c8bd1d24d5..1fdd1eb38fe0 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -181,6 +181,7 @@ struct mxsfb_info {
const struct mxsfb_devdata *devdata;
u32 sync;
struct regulator *reg_lcd;
+ int pre_init;
};
#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -419,6 +420,12 @@ static int mxsfb_set_par(struct fb_info *fb_info)
fb_info->fix.line_length = line_size;
+ if (host->pre_init) {
+ mxsfb_enable_controller(fb_info);
+ host->pre_init = 0;
+ return 0;
+ }
+
/*
* It seems, you can't re-program the controller if it is still running.
* This may lead into shifted pictures (FIFO issue?).
@@ -623,7 +630,6 @@ static int mxsfb_restore_mode(struct fb_info *fb_info,
struct fb_videomode *vmode)
{
struct mxsfb_info *host = fb_info->par;
- unsigned line_count;
unsigned period;
unsigned long pa, fbsize;
int bits_per_pixel, ofs, ret = 0;
@@ -710,7 +716,6 @@ static int mxsfb_restore_mode(struct fb_info *fb_info,
writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf);
}
- line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
fb_info->fix.ypanstep = 1;
clk_prepare_enable(host->clk);
@@ -931,6 +936,10 @@ static int mxsfb_probe(struct platform_device *pdev)
if (IS_ERR(host->reg_lcd))
host->reg_lcd = NULL;
+#if defined(CONFIG_FB_PRE_INIT_FB)
+ host->pre_init = 1;
+#endif
+
fb_info->pseudo_palette = devm_kcalloc(&pdev->dev, 16, sizeof(u32),
GFP_KERNEL);
if (!fb_info->pseudo_palette) {
@@ -963,6 +972,7 @@ static int mxsfb_probe(struct platform_device *pdev)
mxsfb_enable_controller(fb_info);
}
+ host->pre_init = 0;
dev_info(&pdev->dev, "initialized\n");
return 0;
diff --git a/drivers/video/fbdev/nuc900fb.c b/drivers/video/fbdev/nuc900fb.c
index 6680edae4696..44ea5380a546 100644
--- a/drivers/video/fbdev/nuc900fb.c
+++ b/drivers/video/fbdev/nuc900fb.c
@@ -455,7 +455,7 @@ static int nuc900fb_cpufreq_transition(struct notifier_block *nb,
struct fb_info *fbinfo;
long delta_f;
info = container_of(nb, struct nuc900fb_info, freq_transition);
- fbinfo = platform_get_drvdata(to_platform_device(info->dev));
+ fbinfo = dev_get_drvdata(info->dev);
delta_f = info->clk_rate - clk_get_rate(info->clk);
diff --git a/drivers/video/fbdev/omap/Kconfig b/drivers/video/fbdev/omap/Kconfig
index 29d250da8a3e..ca147936bb5c 100644
--- a/drivers/video/fbdev/omap/Kconfig
+++ b/drivers/video/fbdev/omap/Kconfig
@@ -6,7 +6,7 @@ config FB_OMAP
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
- Frame buffer driver for OMAP based boards.
+ Frame buffer driver for OMAP based boards.
config FB_OMAP_LCDC_EXTERNAL
bool "External LCD controller support"
@@ -49,13 +49,11 @@ config FB_OMAP_LCD_H3
H3 board.
config FB_OMAP_DMA_TUNE
- bool "Set DMA SDRAM access priority high"
- depends on FB_OMAP
- help
- On systems in which video memory is in system memory
- (SDRAM) this will speed up graphics DMA operations.
- If you have such a system and want to use rotation
- answer yes. Answer no if you have a dedicated video
- memory, or don't use any of the accelerated features.
-
-
+ bool "Set DMA SDRAM access priority high"
+ depends on FB_OMAP
+ help
+ On systems in which video memory is in system memory
+ (SDRAM) this will speed up graphics DMA operations.
+ If you have such a system and want to use rotation
+ answer yes. Answer no if you have a dedicated video
+ memory, or don't use any of the accelerated features.
diff --git a/drivers/video/fbdev/omap2/omapfb/Kconfig b/drivers/video/fbdev/omap2/omapfb/Kconfig
index 3bf154e676d1..0410e07bb29e 100644
--- a/drivers/video/fbdev/omap2/omapfb/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/Kconfig
@@ -2,23 +2,23 @@ config OMAP2_VRFB
bool
menuconfig FB_OMAP2
- tristate "OMAP2+ frame buffer support"
- depends on FB
- depends on DRM_OMAP = n
+ tristate "OMAP2+ frame buffer support"
+ depends on FB
+ depends on DRM_OMAP = n
depends on GPIOLIB
- select FB_OMAP2_DSS
+ select FB_OMAP2_DSS
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
Frame buffer driver for OMAP2+ based boards.
if FB_OMAP2
config FB_OMAP2_DEBUG_SUPPORT
- bool "Debug support for OMAP2+ FB"
+ bool "Debug support for OMAP2+ FB"
default y
depends on FB_OMAP2
help
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
index 08f12039dd02..3df8736cf8d8 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
@@ -1,5 +1,5 @@
menu "OMAPFB Panel and Encoder Drivers"
- depends on FB_OMAP2_DSS
+ depends on FB_OMAP2_DSS
config FB_OMAP2_ENCODER_OPA362
tristate "OPA362 external analog amplifier"
@@ -8,29 +8,29 @@ config FB_OMAP2_ENCODER_OPA362
through a GPIO.
config FB_OMAP2_ENCODER_TFP410
- tristate "TFP410 DPI to DVI Encoder"
+ tristate "TFP410 DPI to DVI Encoder"
help
Driver for TFP410 DPI to DVI encoder.
config FB_OMAP2_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
+ tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
config FB_OMAP2_CONNECTOR_DVI
- tristate "DVI Connector"
+ tristate "DVI Connector"
depends on I2C
help
Driver for a generic DVI connector.
config FB_OMAP2_CONNECTOR_HDMI
- tristate "HDMI Connector"
+ tristate "HDMI Connector"
help
Driver for a generic HDMI connector.
config FB_OMAP2_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
+ tristate "Analog TV Connector"
help
Driver for a generic analog TV connector.
@@ -58,29 +58,29 @@ config FB_OMAP2_PANEL_LGPHILIPS_LB035Q02
LCD Panel used on the Gumstix Overo Palo35
config FB_OMAP2_PANEL_SHARP_LS037V7DW01
- tristate "Sharp LS037V7DW01 LCD Panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- LCD Panel used in TI's SDP3430 and EVM boards
+ tristate "Sharp LS037V7DW01 LCD Panel"
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ LCD Panel used in TI's SDP3430 and EVM boards
config FB_OMAP2_PANEL_TPO_TD028TTEC1
- tristate "TPO TD028TTEC1 LCD Panel"
- depends on SPI
- help
- LCD panel used in Openmoko.
+ tristate "TPO TD028TTEC1 LCD Panel"
+ depends on SPI
+ help
+ LCD panel used in Openmoko.
config FB_OMAP2_PANEL_TPO_TD043MTEA1
- tristate "TPO TD043MTEA1 LCD Panel"
- depends on SPI
- help
- LCD Panel used in OMAP3 Pandora
+ tristate "TPO TD043MTEA1 LCD Panel"
+ depends on SPI
+ help
+ LCD Panel used in OMAP3 Pandora
config FB_OMAP2_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 Panel"
depends on SPI
depends on BACKLIGHT_CLASS_DEVICE
help
- This NEC NL8048HL11 panel is TFT LCD used in the
- Zoom2/3/3630 sdp boards.
+ This NEC NL8048HL11 panel is TFT LCD used in the
+ Zoom2/3/3630 sdp boards.
endmenu
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
index 356b89b378d4..a34820e8ab97 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
@@ -3,7 +3,7 @@ config FB_OMAP2_DSS_INIT
bool
config FB_OMAP2_DSS
- tristate
+ tristate
select VIDEOMODE_HELPERS
select FB_OMAP2_DSS_INIT
select HDMI
@@ -53,7 +53,7 @@ config FB_OMAP2_DSS_RFBI
config FB_OMAP2_DSS_VENC
bool "VENC support"
- default y
+ default y
help
OMAP Video Encoder support for S-Video and composite TV-out.
@@ -62,7 +62,7 @@ config FB_OMAP2_DSS_HDMI_COMMON
config FB_OMAP4_DSS_HDMI
bool "HDMI support for OMAP4"
- default y
+ default y
select FB_OMAP2_DSS_HDMI_COMMON
help
HDMI support for OMAP4 based SoCs.
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
index 136d30484d02..5da7ed6d653e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
@@ -111,6 +111,8 @@ static void __init omapdss_omapify_node(struct device_node *node)
new_len = prop->length + strlen(prefix) * num_strs;
new_compat = kmalloc(new_len, GFP_KERNEL);
+ if (!new_compat)
+ return;
omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length);
@@ -193,8 +195,10 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
- if (dss == NULL || !of_device_is_available(dss))
+ if (dss == NULL || !of_device_is_available(dss)) {
+ of_node_put(dss);
return 0;
+ }
omapdss_walk_device(dss, true);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 8a53d1de611d..4e4d6a0df978 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,7 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages);
+ ret = get_user_pages_fast((unsigned long)buf, nr_pages, FOLL_WRITE, pages);
if (ret < nr_pages) {
nr_pages = ret;
ret = -EINVAL;
@@ -1071,7 +1071,6 @@ static struct pvr2_board {
static int __init pvr2fb_init(void)
{
int i, ret = -ENODEV;
- int size;
#ifndef MODULE
char *option = NULL;
@@ -1080,7 +1079,6 @@ static int __init pvr2fb_init(void)
return -ENODEV;
pvr2fb_setup(option);
#endif
- size = sizeof(struct fb_info) + sizeof(struct pvr2fb_par) + 16 * sizeof(u32);
fb_info = framebuffer_alloc(sizeof(struct pvr2fb_par), NULL);
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index a67e4567e656..a702da89910b 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -777,7 +777,7 @@ static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
long delta_f;
info = container_of(nb, struct s3c2410fb_info, freq_transition);
- fbinfo = platform_get_drvdata(to_platform_device(info->dev));
+ fbinfo = dev_get_drvdata(info->dev);
/* work out change, <0 for speed-up */
delta_f = info->clk_rate - clk_get_rate(info->clk);
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index c09d7426cd92..47b78f0138c3 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -2155,9 +2155,9 @@ static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev,
err = fb_alloc_cmap(&info->cmap, NR_PALETTE, 0);
if (!err)
- info->flags |= FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT |
- FBINFO_HWACCEL_IMAGEBLIT;
+ info->flags |= FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
}
#endif
return err;
diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
index aad1cc4be34a..c7ebf03b8d53 100644
--- a/drivers/video/fbdev/sm712.h
+++ b/drivers/video/fbdev/sm712.h
@@ -15,14 +15,10 @@
#define FB_ACCEL_SMI_LYNX 88
-#define SCREEN_X_RES 1024
-#define SCREEN_Y_RES 600
-#define SCREEN_BPP 16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE 0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE 0x00800000
+#define SCREEN_X_RES 1024
+#define SCREEN_Y_RES_PC 768
+#define SCREEN_Y_RES_NETBOOK 600
+#define SCREEN_BPP 16
#define dac_reg (0x3c8)
#define dac_val (0x3c9)
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 502d0de2feec..f1dcc6766d1e 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
},
},
+ { /* 1024 x 768 16Bpp 60Hz */
+ 1024, 768, 16, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
{ /* mode#5: 1024 x 768 24Bpp 60Hz */
1024, 768, 24, 60,
/* Init_MISC */
@@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
static int smtc_blank(int blank_mode, struct fb_info *info)
{
+ struct smtcfb_info *sfb = info->par;
+
/* clear DPMS setting */
switch (blank_mode) {
case FB_BLANK_UNBLANK:
/* Screen On: HSync: On, VSync : On */
+
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ case 0x720:
+ smtc_seqw(0x6a, 0x0d);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ }
+
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
break;
case FB_BLANK_NORMAL:
/* Screen Off: HSync: On, VSync : On Soft blank */
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
smtc_seqw(0x6a, 0x16);
smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
break;
case FB_BLANK_VSYNC_SUSPEND:
/* Screen On: HSync: On, VSync : Off */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
case FB_BLANK_HSYNC_SUSPEND:
/* Screen On: HSync: Off, VSync : On */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
case FB_BLANK_POWERDOWN:
/* Screen On: HSync: Off, VSync : Off */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
default:
return -EINVAL;
@@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
/* init SEQ register SR30 - SR75 */
for (i = 0; i < SIZE_SR30_SR75; i++)
- if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
- (i + 0x30) != 0x6b)
+ if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
+ (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
+ (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
+ (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
smtc_seqw(i + 0x30,
vgamode[j].init_sr30_sr75[i]);
@@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
/* init CRTC register CR30 - CR4D */
- for (i = 0; i < SIZE_CR30_CR4D; i++)
+ for (i = 0; i < SIZE_CR30_CR4D; i++) {
+ if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
+ /* side-effect, don't write to CR3B-CR3F */
+ continue;
smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
+ }
/* init CRTC register CR90 - CRA7 */
for (i = 0; i < SIZE_CR90_CRA7; i++)
@@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
{
sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
+ if (sfb->chip_id == 0x720)
+ /* on SM720, the framebuffer starts at the 1 MB offset */
+ sfb->fb->fix.smem_start += 0x00200000;
+
+ /* XXX: is it safe for SM720 on Big-Endian? */
if (sfb->fb->var.bits_per_pixel == 32)
sfb->fb->fix.smem_start += big_addr;
@@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void)
outb_p(0x11, 0x3c5);
}
+static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
+{
+ u8 vram;
+
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ /*
+ * Assume SM712 graphics chip has 4MB VRAM.
+ *
+ * FIXME: SM712 can have 2MB VRAM, which is used on earlier
+ * laptops, such as IBM Thinkpad 240X. This driver would
+ * probably crash on those machines. If anyone gets one of
+ * those and is willing to help, run "git blame" and send me
+ * an E-mail.
+ */
+ return 0x00400000;
+ case 0x720:
+ outb_p(0x76, 0x3c4);
+ vram = inb_p(0x3c5) >> 6;
+
+ if (vram == 0x00)
+ return 0x00800000; /* 8 MB */
+ else if (vram == 0x01)
+ return 0x01000000; /* 16 MB */
+ else if (vram == 0x02)
+ return 0x00400000; /* illegal, fallback to 4 MB */
+ else if (vram == 0x03)
+ return 0x00400000; /* 4 MB */
+ }
+ return 0; /* unknown hardware */
+}
+
+static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
+{
+ /* get mode parameter from smtc_scr_info */
+ if (smtc_scr_info.lfb_width != 0) {
+ sfb->fb->var.xres = smtc_scr_info.lfb_width;
+ sfb->fb->var.yres = smtc_scr_info.lfb_height;
+ sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
+ goto final;
+ }
+
+ /*
+ * No parameter, default resolution is 1024x768-16.
+ *
+ * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
+ * panel, also see the comments about Thinkpad 240X above.
+ */
+ sfb->fb->var.xres = SCREEN_X_RES;
+ sfb->fb->var.yres = SCREEN_Y_RES_PC;
+ sfb->fb->var.bits_per_pixel = SCREEN_BPP;
+
+#ifdef CONFIG_MIPS
+ /*
+ * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
+ * target platform of this driver, but nearly all old x86 laptops have
+ * 1024x768. Lighting 768 panels using 600's timings would partially
+ * garble the display, so we don't want that. But it's not possible to
+ * distinguish them reliably.
+ *
+ * So we change the default to 768, but keep 600 as-is on MIPS.
+ */
+ sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
+#endif
+
+final:
+ big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
+}
+
static int smtcfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct smtcfb_info *sfb;
struct fb_info *info;
- u_long smem_size = 0x00800000; /* default 8MB */
+ u_long smem_size;
int err;
unsigned long mmio_base;
@@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
sm7xx_init_hw();
- /* get mode parameter from smtc_scr_info */
- if (smtc_scr_info.lfb_width != 0) {
- sfb->fb->var.xres = smtc_scr_info.lfb_width;
- sfb->fb->var.yres = smtc_scr_info.lfb_height;
- sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
- } else {
- /* default resolution 1024x600 16bit mode */
- sfb->fb->var.xres = SCREEN_X_RES;
- sfb->fb->var.yres = SCREEN_Y_RES;
- sfb->fb->var.bits_per_pixel = SCREEN_BPP;
- }
-
- big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
/* Map address and memory detection */
mmio_base = pci_resource_start(pdev, 0);
pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
+ smem_size = sm7xx_vram_probe(sfb);
+ dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
+ smem_size / 1048576);
+
switch (sfb->chip_id) {
case 0x710:
case 0x712:
sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
sfb->fb->fix.mmio_len = 0x00400000;
- smem_size = SM712_VIDEOMEMORYSIZE;
sfb->lfb = ioremap(mmio_base, mmio_addr);
if (!sfb->lfb) {
dev_err(&pdev->dev,
@@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
case 0x720:
sfb->fb->fix.mmio_start = mmio_base;
sfb->fb->fix.mmio_len = 0x00200000;
- smem_size = SM722_VIDEOMEMORYSIZE;
- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
+ sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
sfb->lfb = sfb->dp_regs + 0x00200000;
sfb->mmio = (smtc_regbaseaddress =
sfb->dp_regs + 0x000c0000);
@@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
goto failed_fb;
}
+ /* probe and decide resolution */
+ sm7xx_resolution_probe(sfb);
+
/* can support 32 bpp */
if (sfb->fb->var.bits_per_pixel == 15)
sfb->fb->var.bits_per_pixel = 16;
@@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
if (err)
goto failed;
- smtcfb_setmode(sfb);
+ /*
+ * The screen would be temporarily garbled when sm712fb takes over
+ * vesafb or VGA text mode. Zero the framebuffer.
+ */
+ memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
err = register_framebuffer(info);
if (err < 0)
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 1d034dddc556..5a0d6fb02bbc 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
return 0;
}
-static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
- int width, int height, char *data)
+static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
{
int i, ret;
char *cmd;
@@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
start_cycles = get_cycles();
+ mutex_lock(&dlfb->render_mutex);
+
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
if ((width <= 0) ||
(x + width > dlfb->info->var.xres) ||
- (y + height > dlfb->info->var.yres))
- return -EINVAL;
+ (y + height > dlfb->info->var.yres)) {
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
- if (!atomic_read(&dlfb->usb_active))
- return 0;
+ if (!atomic_read(&dlfb->usb_active)) {
+ ret = 0;
+ goto unlock_ret;
+ }
urb = dlfb_get_urb(dlfb);
- if (!urb)
- return 0;
+ if (!urb) {
+ ret = 0;
+ goto unlock_ret;
+ }
cmd = urb->transfer_buffer;
for (i = y; i < y + height ; i++) {
@@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
*cmd++ = 0xAF;
/* Send partial buffer remaining before exiting */
len = cmd - (char *) urb->transfer_buffer;
- ret = dlfb_submit_urb(dlfb, urb, len);
+ dlfb_submit_urb(dlfb, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
@@ -655,7 +662,55 @@ error:
>> 10)), /* Kcycles */
&dlfb->cpu_kcycles_used);
- return 0;
+ ret = 0;
+
+unlock_ret:
+ mutex_unlock(&dlfb->render_mutex);
+ return ret;
+}
+
+static void dlfb_init_damage(struct dlfb_data *dlfb)
+{
+ dlfb->damage_x = INT_MAX;
+ dlfb->damage_x2 = 0;
+ dlfb->damage_y = INT_MAX;
+ dlfb->damage_y2 = 0;
+}
+
+static void dlfb_damage_work(struct work_struct *w)
+{
+ struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
+ int x, x2, y, y2;
+
+ spin_lock_irq(&dlfb->damage_lock);
+ x = dlfb->damage_x;
+ x2 = dlfb->damage_x2;
+ y = dlfb->damage_y;
+ y2 = dlfb->damage_y2;
+ dlfb_init_damage(dlfb);
+ spin_unlock_irq(&dlfb->damage_lock);
+
+ if (x < x2 && y < y2)
+ dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
+}
+
+static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
+{
+ unsigned long flags;
+ int x2 = x + width;
+ int y2 = y + height;
+
+ if (x >= x2 || y >= y2)
+ return;
+
+ spin_lock_irqsave(&dlfb->damage_lock, flags);
+ dlfb->damage_x = min(x, dlfb->damage_x);
+ dlfb->damage_x2 = max(x2, dlfb->damage_x2);
+ dlfb->damage_y = min(y, dlfb->damage_y);
+ dlfb->damage_y2 = max(y2, dlfb->damage_y2);
+ spin_unlock_irqrestore(&dlfb->damage_lock, flags);
+
+ schedule_work(&dlfb->damage_work);
}
/*
@@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
(u32)info->var.yres);
dlfb_handle_damage(dlfb, 0, start, info->var.xres,
- lines, info->screen_base);
+ lines);
}
return result;
@@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info,
sys_copyarea(info, area);
- dlfb_handle_damage(dlfb, area->dx, area->dy,
- area->width, area->height, info->screen_base);
+ dlfb_offload_damage(dlfb, area->dx, area->dy,
+ area->width, area->height);
}
static void dlfb_ops_imageblit(struct fb_info *info,
@@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info,
sys_imageblit(info, image);
- dlfb_handle_damage(dlfb, image->dx, image->dy,
- image->width, image->height, info->screen_base);
+ dlfb_offload_damage(dlfb, image->dx, image->dy,
+ image->width, image->height);
}
static void dlfb_ops_fillrect(struct fb_info *info,
@@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info,
sys_fillrect(info, rect);
- dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width,
- rect->height, info->screen_base);
+ dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
+ rect->height);
}
/*
@@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
int bytes_identical = 0;
int bytes_rendered = 0;
+ mutex_lock(&dlfb->render_mutex);
+
if (!fb_defio)
- return;
+ goto unlock_ret;
if (!atomic_read(&dlfb->usb_active))
- return;
+ goto unlock_ret;
start_cycles = get_cycles();
urb = dlfb_get_urb(dlfb);
if (!urb)
- return;
+ goto unlock_ret;
cmd = urb->transfer_buffer;
@@ -782,6 +839,8 @@ error:
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dlfb->cpu_kcycles_used);
+unlock_ret:
+ mutex_unlock(&dlfb->render_mutex);
}
static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
@@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
if (area.y > info->var.yres)
area.y = info->var.yres;
- dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h,
- info->screen_base);
+ dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
}
return 0;
@@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info)
{
struct dlfb_data *dlfb = info->par;
+ cancel_work_sync(&dlfb->damage_work);
+
+ mutex_destroy(&dlfb->render_mutex);
+
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
@@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info)
pix_framebuffer[i] = 0x37e6;
}
- dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
- info->screen_base);
+ dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
return 0;
}
@@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf,
dlfb->ops = dlfb_ops;
info->fbops = &dlfb->ops;
+ mutex_init(&dlfb->render_mutex);
+ dlfb_init_damage(dlfb);
+ spin_lock_init(&dlfb->damage_lock);
+ INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
+
INIT_LIST_HEAD(&info->modelist);
if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 34dc8e53a1e9..d707fdb97354 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1543,7 +1543,7 @@ static void uvesafb_ioremap(struct fb_info *info)
static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%.4x\n", par->vbe_ib.vbe_version);
@@ -1554,7 +1554,7 @@ static DEVICE_ATTR(vbe_version, S_IRUGO, uvesafb_show_vbe_ver, NULL);
static ssize_t uvesafb_show_vbe_modes(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
int ret = 0, i;
@@ -1573,7 +1573,7 @@ static DEVICE_ATTR(vbe_modes, S_IRUGO, uvesafb_show_vbe_modes, NULL);
static ssize_t uvesafb_show_vendor(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_vendor_name_ptr)
@@ -1588,7 +1588,7 @@ static DEVICE_ATTR(oem_vendor, S_IRUGO, uvesafb_show_vendor, NULL);
static ssize_t uvesafb_show_product_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_name_ptr)
@@ -1603,7 +1603,7 @@ static DEVICE_ATTR(oem_product_name, S_IRUGO, uvesafb_show_product_name, NULL);
static ssize_t uvesafb_show_product_rev(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_rev_ptr)
@@ -1618,7 +1618,7 @@ static DEVICE_ATTR(oem_product_rev, S_IRUGO, uvesafb_show_product_rev, NULL);
static ssize_t uvesafb_show_oem_string(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_string_ptr)
@@ -1633,7 +1633,7 @@ static DEVICE_ATTR(oem_string, S_IRUGO, uvesafb_show_oem_string, NULL);
static ssize_t uvesafb_show_nocrtc(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
@@ -1642,7 +1642,7 @@ static ssize_t uvesafb_show_nocrtc(struct device *dev,
static ssize_t uvesafb_store_nocrtc(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
+ struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
if (count > 0) {
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index 528fe917dd49..dc1f9cfb6e7e 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -336,8 +336,8 @@ static int vesafb_probe(struct platform_device *dev)
printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
if (pmi_base[3]) {
printk(KERN_INFO "vesafb: pmi: ports = ");
- for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++)
- printk("%x ",pmi_base[i]);
+ for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++)
+ printk("%x ", pmi_base[i]);
printk("\n");
if (pmi_base[i] != 0xffff) {
/*
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 6a4bbc9e1fb0..a3d6b6db221b 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -677,7 +677,7 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 8ba726e600e9..93d5bebf9572 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -215,6 +215,9 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
* hypervisor.
*/
lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
+ if (param.count == 0 ||
+ param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
+ return -EINVAL;
num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Allocate the buffers we need */
@@ -244,7 +247,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/* Get the physical addresses of the source buffer */
num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
- num_pages, param.source != -1, pages);
+ num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
if (num_pinned != num_pages) {
/* get_user_pages() failed */
@@ -331,8 +334,8 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
struct fsl_hv_ioctl_prop param;
char __user *upath, *upropname;
void __user *upropval;
- char *path = NULL, *propname = NULL;
- void *propval = NULL;
+ char *path, *propname;
+ void *propval;
int ret = 0;
/* Get the parameters from the user. */
@@ -344,32 +347,30 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
upropval = (void __user *)(uintptr_t)param.propval;
path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
- if (IS_ERR(path)) {
- ret = PTR_ERR(path);
- goto out;
- }
+ if (IS_ERR(path))
+ return PTR_ERR(path);
propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
if (IS_ERR(propname)) {
ret = PTR_ERR(propname);
- goto out;
+ goto err_free_path;
}
if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
ret = -EINVAL;
- goto out;
+ goto err_free_propname;
}
propval = kmalloc(param.proplen, GFP_KERNEL);
if (!propval) {
ret = -ENOMEM;
- goto out;
+ goto err_free_propname;
}
if (set) {
if (copy_from_user(propval, upropval, param.proplen)) {
ret = -EFAULT;
- goto out;
+ goto err_free_propval;
}
param.ret = fh_partition_set_dtprop(param.handle,
@@ -388,7 +389,7 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
if (copy_to_user(upropval, propval, param.proplen) ||
put_user(param.proplen, &p->proplen)) {
ret = -EFAULT;
- goto out;
+ goto err_free_propval;
}
}
}
@@ -396,10 +397,12 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
if (put_user(param.ret, &p->ret))
ret = -EFAULT;
-out:
- kfree(path);
+err_free_propval:
kfree(propval);
+err_free_propname:
kfree(propname);
+err_free_path:
+ kfree(path);
return ret;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5df92c308286..0a7b3ce3fb75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1004,6 +1004,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
+ kfree(desc);
END_USE(vq);
return -ENOSPC;
}
@@ -1718,10 +1719,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/**
* virtqueue_add_sgs - expose buffers to other end
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @sgs: array of terminated scatterlists.
- * @out_num: the number of scatterlists readable by other side
- * @in_num: the number of scatterlists which are writable (after readable ones)
+ * @out_sgs: the number of scatterlists readable by other side
+ * @in_sgs: the number of scatterlists which are writable (after readable ones)
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
@@ -1821,7 +1822,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
- * @vq: the struct virtqueue
+ * @_vq: the struct virtqueue
*
* Instead of virtqueue_kick(), you can do:
* if (virtqueue_kick_prepare(vq))
@@ -1841,7 +1842,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/**
* virtqueue_notify - second half of split virtqueue_kick call.
- * @vq: the struct virtqueue
+ * @_vq: the struct virtqueue
*
* This does not need to be serialized.
*
@@ -1885,8 +1886,9 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
/**
* virtqueue_get_buf - get the next used buffer
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
+ * @ctx: extra context for the token
*
* If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
@@ -1916,7 +1918,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
* virtqueue_disable_cb - disable callbacks
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
@@ -1936,7 +1938,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns current queue state
* in an opaque unsigned value. This value should be later tested by
@@ -1957,7 +1959,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
/**
* virtqueue_poll - query pending used buffers
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
*
* Returns "true" if there are pending used buffers in the queue.
@@ -1976,7 +1978,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
/**
* virtqueue_enable_cb - restart callbacks after disable_cb.
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
@@ -1995,7 +1997,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/**
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
@@ -2017,7 +2019,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/**
* virtqueue_detach_unused_buf - detach first unused buffer
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device
@@ -2249,7 +2251,7 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
/**
* virtqueue_get_vring_size - return the size of the virtqueue's vring
- * @vq: the struct virtqueue containing the vring of interest.
+ * @_vq: the struct virtqueue containing the vring of interest.
*
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 242eea859637..7ea60371bda0 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -30,7 +30,7 @@ menuconfig WATCHDOG
if WATCHDOG
config WATCHDOG_CORE
- bool "WatchDog Timer Driver Core"
+ tristate "WatchDog Timer Driver Core"
---help---
Say Y here if you want to use the new watchdog timer driver core.
This driver provides a framework for all watchdog timer drivers
@@ -63,6 +63,66 @@ config WATCHDOG_SYSFS
Say Y here if you want to enable watchdog device status read through
sysfs attributes.
+comment "Watchdog Pretimeout Governors"
+
+config WATCHDOG_PRETIMEOUT_GOV
+ bool "Enable watchdog pretimeout governors"
+ depends on WATCHDOG_CORE
+ help
+ The option allows to select watchdog pretimeout governors.
+
+config WATCHDOG_PRETIMEOUT_GOV_SEL
+ tristate
+ depends on WATCHDOG_PRETIMEOUT_GOV
+ default m
+ select WATCHDOG_PRETIMEOUT_GOV_PANIC if WATCHDOG_PRETIMEOUT_GOV_NOOP=n
+
+if WATCHDOG_PRETIMEOUT_GOV
+
+config WATCHDOG_PRETIMEOUT_GOV_NOOP
+ tristate "Noop watchdog pretimeout governor"
+ depends on WATCHDOG_CORE
+ default WATCHDOG_CORE
+ help
+ Noop watchdog pretimeout governor, only an informational
+ message is added to kernel log buffer.
+
+config WATCHDOG_PRETIMEOUT_GOV_PANIC
+ tristate "Panic watchdog pretimeout governor"
+ depends on WATCHDOG_CORE
+ default WATCHDOG_CORE
+ help
+ Panic watchdog pretimeout governor, on watchdog pretimeout
+ event put the kernel into panic.
+
+choice
+ prompt "Default Watchdog Pretimeout Governor"
+ default WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+ help
+ This option selects a default watchdog pretimeout governor.
+ The governor takes its action, if a watchdog is capable
+ to report a pretimeout event.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP
+ bool "noop"
+ depends on WATCHDOG_PRETIMEOUT_GOV_NOOP
+ help
+ Use noop watchdog pretimeout governor by default. If noop
+ governor is selected by a user, write a short message to
+ the kernel log buffer and don't do any system changes.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+ bool "panic"
+ depends on WATCHDOG_PRETIMEOUT_GOV_PANIC
+ help
+ Use panic watchdog pretimeout governor by default, if
+ a watchdog pretimeout event happens, consider that
+ a watchdog feeder is dead and reboot is unavoidable.
+
+endchoice
+
+endif # WATCHDOG_PRETIMEOUT_GOV
+
#
# General Watchdog drivers
#
@@ -90,6 +150,18 @@ config SOFT_WATCHDOG_PRETIMEOUT
watchdog. Be aware that governors might affect the watchdog because it
is purely software, e.g. the panic governor will stall it!
+config BD70528_WATCHDOG
+ tristate "ROHM BD70528 PMIC Watchdog"
+ depends on MFD_ROHM_BD70528
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the ROHM BD70528 PMIC. Watchdog trigger
+ cause system reset.
+
+ Say Y here to include support for the ROHM BD70528 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called bd70528_wdt.
+
config DA9052_WATCHDOG
tristate "Dialog DA9052 Watchdog"
depends on PMIC_DA9052 || COMPILE_TEST
@@ -552,7 +624,7 @@ config COH901327_WATCHDOG
compiled as a module.
config NPCM7XX_WATCHDOG
- bool "Nuvoton NPCM750 watchdog"
+ tristate "Nuvoton NPCM750 watchdog"
depends on ARCH_NPCM || COMPILE_TEST
default y if ARCH_NPCM7XX
select WATCHDOG_CORE
@@ -641,6 +713,22 @@ config IMX2_WDT
To compile this driver as a module, choose M here: the
module will be called imx2_wdt.
+config IMX_SC_WDT
+ tristate "IMX SC Watchdog"
+ depends on HAVE_ARM_SMCCC
+ select WATCHDOG_CORE
+ help
+ This is the driver for the system controller watchdog
+ on the NXP i.MX SoCs with system controller inside, the
+ watchdog driver will call ARM SMC API and trap into
+ ARM-Trusted-Firmware for operations, ARM-Trusted-Firmware
+ will request system controller to execute the operations.
+ If you have one of these processors and wish to have
+ watchdog support enabled, say Y, otherwise say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx_sc_wdt.
+
config UX500_WATCHDOG
tristate "ST-Ericsson Ux500 watchdog"
depends on MFD_DB8500_PRCMU
@@ -1179,6 +1267,15 @@ config HP_WATCHDOG
To compile this driver as a module, choose M here: the module will be
called hpwdt.
+config HPWDT_NMI_DECODING
+ bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
+ depends on HP_WATCHDOG
+ default y
+ help
+ Enables the NMI handler for the watchdog pretimeout NMI and the iLO
+ "Generate NMI to System" virtual button. When an NMI is claimed
+ by the driver, panic is called.
+
config KEMPLD_WDT
tristate "Kontron COM Watchdog Timer"
depends on MFD_KEMPLD
@@ -1190,15 +1287,6 @@ config KEMPLD_WDT
This driver can also be built as a module. If so, the module will be
called kempld_wdt.
-config HPWDT_NMI_DECODING
- bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
- depends on HP_WATCHDOG
- default y
- help
- Enables the NMI handler for the watchdog pretimeout NMI and the iLO
- "Generate NMI to System" virtual button. When an NMI is claimed
- by the driver, panic is called.
-
config SC1200_WDT
tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
depends on X86
@@ -1647,7 +1735,7 @@ config BCM_KONA_WDT
config BCM_KONA_WDT_DEBUG
bool "DEBUGFS support for BCM Kona Watchdog"
- depends on BCM_KONA_WDT || COMPILE_TEST
+ depends on BCM_KONA_WDT
help
If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
access to the driver's internal data structures as well as watchdog
@@ -2024,53 +2112,4 @@ config USBPCWATCHDOG
Most people will say N.
-comment "Watchdog Pretimeout Governors"
-
-config WATCHDOG_PRETIMEOUT_GOV
- bool "Enable watchdog pretimeout governors"
- help
- The option allows to select watchdog pretimeout governors.
-
-if WATCHDOG_PRETIMEOUT_GOV
-
-choice
- prompt "Default Watchdog Pretimeout Governor"
- default WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
- help
- This option selects a default watchdog pretimeout governor.
- The governor takes its action, if a watchdog is capable
- to report a pretimeout event.
-
-config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP
- bool "noop"
- select WATCHDOG_PRETIMEOUT_GOV_NOOP
- help
- Use noop watchdog pretimeout governor by default. If noop
- governor is selected by a user, write a short message to
- the kernel log buffer and don't do any system changes.
-
-config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
- bool "panic"
- select WATCHDOG_PRETIMEOUT_GOV_PANIC
- help
- Use panic watchdog pretimeout governor by default, if
- a watchdog pretimeout event happens, consider that
- a watchdog feeder is dead and reboot is unavoidable.
-
-endchoice
-
-config WATCHDOG_PRETIMEOUT_GOV_NOOP
- tristate "Noop watchdog pretimeout governor"
- help
- Noop watchdog pretimeout governor, only an informational
- message is added to kernel log buffer.
-
-config WATCHDOG_PRETIMEOUT_GOV_PANIC
- tristate "Panic watchdog pretimeout governor"
- help
- Panic watchdog pretimeout governor, on watchdog pretimeout
- event put the kernel into panic.
-
-endif # WATCHDOG_PRETIMEOUT_GOV
-
endif # WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ba930e464657..7caa920e7e60 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS4800_WATCHDOG) += ts4800_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o
obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
@@ -205,6 +206,7 @@ obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
+obj-$(CONFIG_BD70528_WATCHDOG) += bd70528_wdt.o
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 7e9884960eb9..689b8a0593c1 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -277,8 +277,8 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
timeout = new_timeout;
wdt_keepalive();
- /* Fall through */
}
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 4b4054f54df9..e5dcb26d85f0 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -244,6 +244,11 @@ static const struct watchdog_ops armada_37xx_wdt_ops = {
.get_timeleft = armada_37xx_wdt_get_timeleft,
};
+static void armada_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int armada_37xx_wdt_probe(struct platform_device *pdev)
{
struct armada_37xx_watchdog *dev;
@@ -278,12 +283,14 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
ret = clk_prepare_enable(dev->clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev,
+ armada_clk_disable_unprepare, dev->clk);
+ if (ret)
+ return ret;
dev->clk_rate = clk_get_rate(dev->clk);
- if (!dev->clk_rate) {
- ret = -EINVAL;
- goto disable_clk;
- }
+ if (!dev->clk_rate)
+ return -EINVAL;
/*
* Since the timeout in seconds is given as 32 bit unsigned int, and
@@ -307,35 +314,15 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
watchdog_set_nowayout(&dev->wdt, nowayout);
- ret = watchdog_register_device(&dev->wdt);
+ watchdog_stop_on_reboot(&dev->wdt);
+ ret = devm_watchdog_register_device(&pdev->dev, &dev->wdt);
if (ret)
- goto disable_clk;
+ return ret;
dev_info(&pdev->dev, "Initial timeout %d sec%s\n",
dev->wdt.timeout, nowayout ? ", nowayout" : "");
return 0;
-
-disable_clk:
- clk_disable_unprepare(dev->clk);
- return ret;
-}
-
-static int armada_37xx_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdt = platform_get_drvdata(pdev);
- struct armada_37xx_watchdog *dev = watchdog_get_drvdata(wdt);
-
- watchdog_unregister_device(wdt);
- clk_disable_unprepare(dev->clk);
- return 0;
-}
-
-static void armada_37xx_wdt_shutdown(struct platform_device *pdev)
-{
- struct watchdog_device *wdt = platform_get_drvdata(pdev);
-
- armada_37xx_wdt_stop(wdt);
}
static int __maybe_unused armada_37xx_wdt_suspend(struct device *dev)
@@ -370,8 +357,6 @@ MODULE_DEVICE_TABLE(of, armada_37xx_wdt_match);
static struct platform_driver armada_37xx_wdt_driver = {
.probe = armada_37xx_wdt_probe,
- .remove = armada_37xx_wdt_remove,
- .shutdown = armada_37xx_wdt_shutdown,
.driver = {
.name = "armada_37xx_wdt",
.of_match_table = of_match_ptr(armada_37xx_wdt_match),
diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
index 9768e44ffeb8..c5b9aae544dd 100644
--- a/drivers/watchdog/asm9260_wdt.c
+++ b/drivers/watchdog/asm9260_wdt.c
@@ -196,6 +196,11 @@ static const struct watchdog_ops asm9260_wdt_ops = {
.restart = asm9260_restart,
};
+static void asm9260_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int asm9260_wdt_get_dt_clks(struct asm9260_wdt_priv *priv)
{
int err;
@@ -219,26 +224,32 @@ static int asm9260_wdt_get_dt_clks(struct asm9260_wdt_priv *priv)
dev_err(priv->dev, "Failed to enable ahb_clk!\n");
return err;
}
+ err = devm_add_action_or_reset(priv->dev,
+ asm9260_clk_disable_unprepare,
+ priv->clk_ahb);
+ if (err)
+ return err;
err = clk_set_rate(priv->clk, CLOCK_FREQ);
if (err) {
- clk_disable_unprepare(priv->clk_ahb);
dev_err(priv->dev, "Failed to set rate!\n");
return err;
}
err = clk_prepare_enable(priv->clk);
if (err) {
- clk_disable_unprepare(priv->clk_ahb);
dev_err(priv->dev, "Failed to enable clk!\n");
return err;
}
+ err = devm_add_action_or_reset(priv->dev,
+ asm9260_clk_disable_unprepare,
+ priv->clk);
+ if (err)
+ return err;
/* wdt has internal divider */
clk = clk_get_rate(priv->clk);
if (!clk) {
- clk_disable_unprepare(priv->clk);
- clk_disable_unprepare(priv->clk_ahb);
dev_err(priv->dev, "Failed, clk is 0!\n");
return -EINVAL;
}
@@ -274,25 +285,23 @@ static void asm9260_wdt_get_dt_mode(struct asm9260_wdt_priv *priv)
static int asm9260_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct asm9260_wdt_priv *priv;
struct watchdog_device *wdd;
- struct resource *res;
int ret;
static const char * const mode_name[] = { "hw", "sw", "debug", };
- priv = devm_kzalloc(&pdev->dev, sizeof(struct asm9260_wdt_priv),
- GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct asm9260_wdt_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->dev = &pdev->dev;
+ priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->iobase = devm_ioremap_resource(&pdev->dev, res);
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
- priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst");
+ priv->rst = devm_reset_control_get_exclusive(dev, "wdt_rst");
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
@@ -305,7 +314,7 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
wdd->ops = &asm9260_wdt_ops;
wdd->min_timeout = 1;
wdd->max_timeout = BM_WDTC_MAX(priv->wdt_freq);
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_drvdata(wdd, priv);
@@ -315,7 +324,7 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
* the max instead.
*/
wdd->timeout = ASM9260_WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(wdd, 0, &pdev->dev);
+ watchdog_init_timeout(wdd, 0, dev);
asm9260_wdt_get_dt_mode(priv);
@@ -327,49 +336,25 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
* Not all supported platforms specify an interrupt for the
* watchdog, so let's make it optional.
*/
- ret = devm_request_irq(&pdev->dev, priv->irq,
- asm9260_wdt_irq, 0, pdev->name, priv);
+ ret = devm_request_irq(dev, priv->irq, asm9260_wdt_irq, 0,
+ pdev->name, priv);
if (ret < 0)
- dev_warn(&pdev->dev, "failed to request IRQ\n");
+ dev_warn(dev, "failed to request IRQ\n");
}
watchdog_set_restart_priority(wdd, 128);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
- goto clk_off;
+ return ret;
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "Watchdog enabled (timeout: %d sec, mode: %s)\n",
+ dev_info(dev, "Watchdog enabled (timeout: %d sec, mode: %s)\n",
wdd->timeout, mode_name[priv->mode]);
return 0;
-
-clk_off:
- clk_disable_unprepare(priv->clk);
- clk_disable_unprepare(priv->clk_ahb);
- return ret;
-}
-
-static void asm9260_wdt_shutdown(struct platform_device *pdev)
-{
- struct asm9260_wdt_priv *priv = platform_get_drvdata(pdev);
-
- asm9260_wdt_disable(&priv->wdd);
-}
-
-static int asm9260_wdt_remove(struct platform_device *pdev)
-{
- struct asm9260_wdt_priv *priv = platform_get_drvdata(pdev);
-
- asm9260_wdt_disable(&priv->wdd);
-
- watchdog_unregister_device(&priv->wdd);
-
- clk_disable_unprepare(priv->clk);
- clk_disable_unprepare(priv->clk_ahb);
-
- return 0;
}
static const struct of_device_id asm9260_wdt_of_match[] = {
@@ -384,8 +369,6 @@ static struct platform_driver asm9260_wdt_driver = {
.of_match_table = asm9260_wdt_of_match,
},
.probe = asm9260_wdt_probe,
- .remove = asm9260_wdt_remove,
- .shutdown = asm9260_wdt_shutdown,
};
module_platform_driver(asm9260_wdt_driver);
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 1abe4d021fd2..34117745c65f 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -187,22 +187,21 @@ static const struct watchdog_info aspeed_wdt_info = {
static int aspeed_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct aspeed_wdt_config *config;
const struct of_device_id *ofdid;
struct aspeed_wdt *wdt;
- struct resource *res;
struct device_node *np;
const char *reset_type;
u32 duration;
u32 status;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -214,12 +213,12 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
wdt->wdd.info = &aspeed_wdt_info;
wdt->wdd.ops = &aspeed_wdt_ops;
wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
- np = pdev->dev.of_node;
+ np = dev->of_node;
ofdid = of_match_node(aspeed_wdt_of_table, np);
if (!ofdid)
@@ -288,11 +287,11 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
u32 max_duration = config->ext_pulse_width_mask + 1;
if (duration == 0 || duration > max_duration) {
- dev_err(&pdev->dev, "Invalid pulse duration: %uus\n",
- duration);
+ dev_err(dev, "Invalid pulse duration: %uus\n",
+ duration);
duration = max(1U, min(max_duration, duration));
- dev_info(&pdev->dev, "Pulse duration set to %uus\n",
- duration);
+ dev_info(dev, "Pulse duration set to %uus\n",
+ duration);
}
/*
@@ -314,9 +313,9 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
wdt->wdd.bootstatus = WDIOF_CARDRESET;
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register\n");
+ dev_err(dev, "failed to register\n");
return ret;
}
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index f4050a229eb5..292b5a1ca831 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -327,7 +327,6 @@ static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
static int __init at91wdt_probe(struct platform_device *pdev)
{
- struct resource *r;
int err;
struct at91wdt *wdt;
@@ -346,8 +345,7 @@ static int __init at91wdt_probe(struct platform_device *pdev)
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0xFFFF;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, r);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 4f56b63f9691..02234c254b10 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -250,15 +250,13 @@ static struct miscdevice ath79_wdt_miscdev = {
static int ath79_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
u32 ctrl;
int err;
if (wdt_base)
return -EBUSY;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
diff --git a/drivers/watchdog/atlas7_wdt.c b/drivers/watchdog/atlas7_wdt.c
index 4abdcabd8219..79337d2a8a8e 100644
--- a/drivers/watchdog/atlas7_wdt.c
+++ b/drivers/watchdog/atlas7_wdt.c
@@ -125,80 +125,57 @@ static const struct of_device_id atlas7_wdt_ids[] = {
{}
};
+static void atlas7_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int atlas7_wdt_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct atlas7_wdog *wdt;
- struct resource *res;
struct clk *clk;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- clk = of_clk_get(np, 0);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "clk enable failed\n");
- goto err;
+ dev_err(dev, "clk enable failed\n");
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, atlas7_clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
/* disable watchdog hardware */
writel(0, wdt->base + ATLAS7_WDT_CNT_CTRL);
wdt->tick_rate = clk_get_rate(clk);
- if (!wdt->tick_rate) {
- ret = -EINVAL;
- goto err1;
- }
+ if (!wdt->tick_rate)
+ return -EINVAL;
wdt->clk = clk;
atlas7_wdd.min_timeout = 1;
atlas7_wdd.max_timeout = UINT_MAX / wdt->tick_rate;
- watchdog_init_timeout(&atlas7_wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&atlas7_wdd, 0, dev);
watchdog_set_nowayout(&atlas7_wdd, nowayout);
watchdog_set_drvdata(&atlas7_wdd, wdt);
platform_set_drvdata(pdev, &atlas7_wdd);
- ret = watchdog_register_device(&atlas7_wdd);
- if (ret)
- goto err1;
-
- return 0;
-
-err1:
- clk_disable_unprepare(clk);
-err:
- clk_put(clk);
- return ret;
-}
-
-static void atlas7_wdt_shutdown(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
-
- atlas7_wdt_disable(wdd);
- clk_disable_unprepare(wdt->clk);
-}
-
-static int atlas7_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct atlas7_wdog *wdt = watchdog_get_drvdata(wdd);
-
- atlas7_wdt_shutdown(pdev);
- clk_put(wdt->clk);
- return 0;
+ watchdog_stop_on_reboot(&atlas7_wdd);
+ watchdog_stop_on_unregister(&atlas7_wdd);
+ return devm_watchdog_register_device(dev, &atlas7_wdd);
}
static int __maybe_unused atlas7_wdt_suspend(struct device *dev)
@@ -236,8 +213,6 @@ static struct platform_driver atlas7_wdt_driver = {
.of_match_table = atlas7_wdt_ids,
},
.probe = atlas7_wdt_probe,
- .remove = atlas7_wdt_remove,
- .shutdown = atlas7_wdt_shutdown,
};
module_platform_driver(atlas7_wdt_driver);
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 1834524ae373..560c1c54c177 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -177,7 +177,6 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
wdt = devm_kzalloc(dev, sizeof(struct bcm2835_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- platform_set_drvdata(pdev, wdt);
spin_lock_init(&wdt->lock);
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index ce3f646e8077..d3d88f6703d7 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -107,11 +107,15 @@ static const struct watchdog_ops bcm7038_wdt_ops = {
.get_timeleft = bcm7038_wdt_get_timeleft,
};
+static void bcm7038_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int bcm7038_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm7038_watchdog *wdt;
- struct resource *res;
int err;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -120,8 +124,7 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -131,6 +134,11 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
err = clk_prepare_enable(wdt->clk);
if (err)
return err;
+ err = devm_add_action_or_reset(dev,
+ bcm7038_clk_disable_unprepare,
+ wdt->clk);
+ if (err)
+ return err;
wdt->rate = clk_get_rate(wdt->clk);
/* Prevent divide-by-zero exception */
if (!wdt->rate)
@@ -148,10 +156,11 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
wdt->wdd.parent = dev;
watchdog_set_drvdata(&wdt->wdd, wdt);
- err = watchdog_register_device(&wdt->wdd);
+ watchdog_stop_on_reboot(&wdt->wdd);
+ watchdog_stop_on_unregister(&wdt->wdd);
+ err = devm_watchdog_register_device(dev, &wdt->wdd);
if (err) {
dev_err(dev, "Failed to register watchdog device\n");
- clk_disable_unprepare(wdt->clk);
return err;
}
@@ -160,19 +169,6 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int bcm7038_wdt_remove(struct platform_device *pdev)
-{
- struct bcm7038_watchdog *wdt = platform_get_drvdata(pdev);
-
- if (!nowayout)
- bcm7038_wdt_stop(&wdt->wdd);
-
- watchdog_unregister_device(&wdt->wdd);
- clk_disable_unprepare(wdt->clk);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int bcm7038_wdt_suspend(struct device *dev)
{
@@ -198,14 +194,6 @@ static int bcm7038_wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
bcm7038_wdt_resume);
-static void bcm7038_wdt_shutdown(struct platform_device *pdev)
-{
- struct bcm7038_watchdog *wdt = platform_get_drvdata(pdev);
-
- if (watchdog_active(&wdt->wdd))
- bcm7038_wdt_stop(&wdt->wdd);
-}
-
static const struct of_device_id bcm7038_wdt_match[] = {
{ .compatible = "brcm,bcm7038-wdt" },
{},
@@ -214,8 +202,6 @@ MODULE_DEVICE_TABLE(of, bcm7038_wdt_match);
static struct platform_driver bcm7038_wdt_driver = {
.probe = bcm7038_wdt_probe,
- .remove = bcm7038_wdt_remove,
- .shutdown = bcm7038_wdt_shutdown,
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 4249b47902bd..e2ad44816359 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -271,16 +271,10 @@ static struct watchdog_device bcm_kona_wdt_wdd = {
.timeout = SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
};
-static void bcm_kona_wdt_shutdown(struct platform_device *pdev)
-{
- bcm_kona_wdt_stop(&bcm_kona_wdt_wdd);
-}
-
static int bcm_kona_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm_kona_wdt *wdt;
- struct resource *res;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -289,8 +283,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return -ENODEV;
@@ -303,7 +296,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdt);
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
- bcm_kona_wdt_wdd.parent = &pdev->dev;
+ bcm_kona_wdt_wdd.parent = dev;
ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
if (ret) {
@@ -311,7 +304,9 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
return ret;
}
- ret = watchdog_register_device(&bcm_kona_wdt_wdd);
+ watchdog_stop_on_reboot(&bcm_kona_wdt_wdd);
+ watchdog_stop_on_unregister(&bcm_kona_wdt_wdd);
+ ret = devm_watchdog_register_device(dev, &bcm_kona_wdt_wdd);
if (ret) {
dev_err(dev, "Failed to register watchdog device");
return ret;
@@ -326,8 +321,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
static int bcm_kona_wdt_remove(struct platform_device *pdev)
{
bcm_kona_wdt_debug_exit(pdev);
- bcm_kona_wdt_shutdown(pdev);
- watchdog_unregister_device(&bcm_kona_wdt_wdd);
dev_dbg(&pdev->dev, "Watchdog driver disabled");
return 0;
@@ -346,7 +339,6 @@ static struct platform_driver bcm_kona_wdt_driver = {
},
.probe = bcm_kona_wdt_probe,
.remove = bcm_kona_wdt_remove,
- .shutdown = bcm_kona_wdt_shutdown,
};
module_platform_driver(bcm_kona_wdt_driver);
diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c
new file mode 100644
index 000000000000..b0152fef4fc7
--- /dev/null
+++ b/drivers/watchdog/bd70528_wdt.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 ROHM Semiconductors
+// ROHM BD70528MWV watchdog driver
+
+#include <linux/bcd.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd70528.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/watchdog.h>
+
+/*
+ * Max time we can set is 1 hour, 59 minutes and 59 seconds
+ * and Minimum time is 1 second
+ */
+#define WDT_MAX_MS ((2 * 60 * 60 - 1) * 1000)
+#define WDT_MIN_MS 1000
+#define DEFAULT_TIMEOUT 60
+
+#define WD_CTRL_MAGIC1 0x55
+#define WD_CTRL_MAGIC2 0xAA
+
+struct wdtbd70528 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct rohm_regmap_dev *mfd;
+ struct watchdog_device wdt;
+};
+
+/**
+ * bd70528_wdt_set - arm or disarm watchdog timer
+ *
+ * @data: device data for the PMIC instance we want to operate on
+ * @enable: new state of WDT. zero to disable, non zero to enable
+ * @old_state: previous state of WDT will be filled here
+ *
+ * Arm or disarm WDT on BD70528 PMIC. Expected to be called only by
+ * BD70528 RTC and BD70528 WDT drivers. The rtc_timer_lock must be taken
+ * by calling bd70528_wdt_lock before calling bd70528_wdt_set.
+ */
+int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state)
+{
+ int ret, i;
+ unsigned int tmp;
+ struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
+ chip);
+ u8 wd_ctrl_arr[3] = { WD_CTRL_MAGIC1, WD_CTRL_MAGIC2, 0 };
+ u8 *wd_ctrl = &wd_ctrl_arr[2];
+
+ ret = regmap_read(bd70528->chip.regmap, BD70528_REG_WDT_CTRL, &tmp);
+ if (ret)
+ return ret;
+
+ *wd_ctrl = (u8)tmp;
+
+ if (old_state) {
+ if (*wd_ctrl & BD70528_MASK_WDT_EN)
+ *old_state |= BD70528_WDT_STATE_BIT;
+ else
+ *old_state &= ~BD70528_WDT_STATE_BIT;
+ if ((!enable) == (!(*old_state & BD70528_WDT_STATE_BIT)))
+ return 0;
+ }
+
+ if (enable) {
+ if (*wd_ctrl & BD70528_MASK_WDT_EN)
+ return 0;
+ *wd_ctrl |= BD70528_MASK_WDT_EN;
+ } else {
+ if (*wd_ctrl & BD70528_MASK_WDT_EN)
+ *wd_ctrl &= ~BD70528_MASK_WDT_EN;
+ else
+ return 0;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(bd70528->chip.regmap, BD70528_REG_WDT_CTRL,
+ wd_ctrl_arr[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(bd70528->chip.regmap, BD70528_REG_WDT_CTRL, &tmp);
+ if ((tmp & BD70528_MASK_WDT_EN) != (*wd_ctrl & BD70528_MASK_WDT_EN)) {
+ dev_err(bd70528->chip.dev,
+ "Watchdog ctrl mismatch (hw) 0x%x (set) 0x%x\n",
+ tmp, *wd_ctrl);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(bd70528_wdt_set);
+
+/**
+ * bd70528_wdt_lock - take WDT lock
+ *
+ * @bd70528: device data for the PMIC instance we want to operate on
+ *
+ * Lock WDT for arming/disarming in order to avoid race condition caused
+ * by WDT state changes initiated by WDT and RTC drivers.
+ */
+void bd70528_wdt_lock(struct rohm_regmap_dev *data)
+{
+ struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
+ chip);
+
+ mutex_lock(&bd70528->rtc_timer_lock);
+}
+EXPORT_SYMBOL(bd70528_wdt_lock);
+
+/**
+ * bd70528_wdt_unlock - unlock WDT lock
+ *
+ * @bd70528: device data for the PMIC instance we want to operate on
+ *
+ * Unlock WDT lock which has previously been taken by call to
+ * bd70528_wdt_lock.
+ */
+void bd70528_wdt_unlock(struct rohm_regmap_dev *data)
+{
+ struct bd70528_data *bd70528 = container_of(data, struct bd70528_data,
+ chip);
+
+ mutex_unlock(&bd70528->rtc_timer_lock);
+}
+EXPORT_SYMBOL(bd70528_wdt_unlock);
+
+static int bd70528_wdt_set_locked(struct wdtbd70528 *w, int enable)
+{
+ return bd70528_wdt_set(w->mfd, enable, NULL);
+}
+
+static int bd70528_wdt_change(struct wdtbd70528 *w, int enable)
+{
+ int ret;
+
+ bd70528_wdt_lock(w->mfd);
+ ret = bd70528_wdt_set_locked(w, enable);
+ bd70528_wdt_unlock(w->mfd);
+
+ return ret;
+}
+
+static int bd70528_wdt_start(struct watchdog_device *wdt)
+{
+ struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
+
+ dev_dbg(w->dev, "WDT ping...\n");
+ return bd70528_wdt_change(w, 1);
+}
+
+static int bd70528_wdt_stop(struct watchdog_device *wdt)
+{
+ struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
+
+ dev_dbg(w->dev, "WDT stopping...\n");
+ return bd70528_wdt_change(w, 0);
+}
+
+static int bd70528_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
+{
+ unsigned int hours;
+ unsigned int minutes;
+ unsigned int seconds;
+ int ret;
+ struct wdtbd70528 *w = watchdog_get_drvdata(wdt);
+
+ seconds = timeout;
+ hours = timeout / (60 * 60);
+ /* Maximum timeout is 1h 59m 59s => hours is 1 or 0 */
+ if (hours)
+ seconds -= (60 * 60);
+ minutes = seconds / 60;
+ seconds = seconds % 60;
+
+ bd70528_wdt_lock(w->mfd);
+
+ ret = bd70528_wdt_set_locked(w, 0);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_HOUR,
+ BD70528_MASK_WDT_HOUR, hours);
+ if (ret) {
+ dev_err(w->dev, "Failed to set WDT hours\n");
+ goto out_en_unlock;
+ }
+ ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_MINUTE,
+ BD70528_MASK_WDT_MINUTE, bin2bcd(minutes));
+ if (ret) {
+ dev_err(w->dev, "Failed to set WDT minutes\n");
+ goto out_en_unlock;
+ }
+ ret = regmap_update_bits(w->regmap, BD70528_REG_WDT_SEC,
+ BD70528_MASK_WDT_SEC, bin2bcd(seconds));
+ if (ret)
+ dev_err(w->dev, "Failed to set WDT seconds\n");
+ else
+ dev_dbg(w->dev, "WDT tmo set to %u\n", timeout);
+
+out_en_unlock:
+ ret = bd70528_wdt_set_locked(w, 1);
+out_unlock:
+ bd70528_wdt_unlock(w->mfd);
+
+ return ret;
+}
+
+static const struct watchdog_info bd70528_wdt_info = {
+ .identity = "bd70528-wdt",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops bd70528_wdt_ops = {
+ .start = bd70528_wdt_start,
+ .stop = bd70528_wdt_stop,
+ .set_timeout = bd70528_wdt_set_timeout,
+};
+
+static int bd70528_wdt_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd70528;
+ struct wdtbd70528 *w;
+ int ret;
+ unsigned int reg;
+
+ bd70528 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd70528) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+ w = devm_kzalloc(&pdev->dev, sizeof(*w), GFP_KERNEL);
+ if (!w)
+ return -ENOMEM;
+
+ w->regmap = bd70528->regmap;
+ w->mfd = bd70528;
+ w->dev = &pdev->dev;
+
+ w->wdt.info = &bd70528_wdt_info;
+ w->wdt.ops = &bd70528_wdt_ops;
+ w->wdt.min_hw_heartbeat_ms = WDT_MIN_MS;
+ w->wdt.max_hw_heartbeat_ms = WDT_MAX_MS;
+ w->wdt.parent = pdev->dev.parent;
+ w->wdt.timeout = DEFAULT_TIMEOUT;
+ watchdog_set_drvdata(&w->wdt, w);
+ watchdog_init_timeout(&w->wdt, 0, pdev->dev.parent);
+
+ ret = bd70528_wdt_set_timeout(&w->wdt, w->wdt.timeout);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the watchdog timeout\n");
+ return ret;
+ }
+
+ bd70528_wdt_lock(w->mfd);
+ ret = regmap_read(w->regmap, BD70528_REG_WDT_CTRL, &reg);
+ bd70528_wdt_unlock(w->mfd);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get the watchdog state\n");
+ return ret;
+ }
+ if (reg & BD70528_MASK_WDT_EN) {
+ dev_dbg(&pdev->dev, "watchdog was running during probe\n");
+ set_bit(WDOG_HW_RUNNING, &w->wdt.status);
+ }
+
+ ret = devm_watchdog_register_device(&pdev->dev, &w->wdt);
+ if (ret < 0)
+ dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver bd70528_wdt = {
+ .driver = {
+ .name = "bd70528-wdt"
+ },
+ .probe = bd70528_wdt_probe,
+};
+
+module_platform_driver(bd70528_wdt);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD70528 watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index c3924356d173..a22f2d431a35 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -274,6 +274,11 @@ static const struct watchdog_ops cdns_wdt_ops = {
.set_timeout = cdns_wdt_settimeout,
};
+static void cdns_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
/************************Platform Operations*****************************/
/**
* cdns_wdt_probe - Probe call for the device.
@@ -285,13 +290,13 @@ static const struct watchdog_ops cdns_wdt_ops = {
*/
static int cdns_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
int ret, irq;
unsigned long clock_f;
struct cdns_wdt *wdt;
struct watchdog_device *cdns_wdt_device;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -302,19 +307,18 @@ static int cdns_wdt_probe(struct platform_device *pdev)
cdns_wdt_device->min_timeout = CDNS_WDT_MIN_TIMEOUT;
cdns_wdt_device->max_timeout = CDNS_WDT_MAX_TIMEOUT;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->regs = devm_ioremap_resource(&pdev->dev, res);
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->regs))
return PTR_ERR(wdt->regs);
/* Register the interrupt */
- wdt->rst = of_property_read_bool(pdev->dev.of_node, "reset-on-timeout");
+ wdt->rst = of_property_read_bool(dev->of_node, "reset-on-timeout");
irq = platform_get_irq(pdev, 0);
if (!wdt->rst && irq >= 0) {
- ret = devm_request_irq(&pdev->dev, irq, cdns_wdt_irq_handler, 0,
+ ret = devm_request_irq(dev, irq, cdns_wdt_irq_handler, 0,
pdev->name, pdev);
if (ret) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"cannot register interrupt handler err=%d\n",
ret);
return ret;
@@ -322,30 +326,28 @@ static int cdns_wdt_probe(struct platform_device *pdev)
}
/* Initialize the members of cdns_wdt structure */
- cdns_wdt_device->parent = &pdev->dev;
-
- ret = watchdog_init_timeout(cdns_wdt_device, wdt_timeout, &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "unable to set timeout value\n");
- return ret;
- }
+ cdns_wdt_device->parent = dev;
+ watchdog_init_timeout(cdns_wdt_device, wdt_timeout, dev);
watchdog_set_nowayout(cdns_wdt_device, nowayout);
watchdog_stop_on_reboot(cdns_wdt_device);
watchdog_set_drvdata(cdns_wdt_device, wdt);
- wdt->clk = devm_clk_get(&pdev->dev, NULL);
+ wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(&pdev->dev, "input clock not found\n");
- ret = PTR_ERR(wdt->clk);
- return ret;
+ dev_err(dev, "input clock not found\n");
+ return PTR_ERR(wdt->clk);
}
ret = clk_prepare_enable(wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "unable to enable clock\n");
+ dev_err(dev, "unable to enable clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, cdns_clk_disable_unprepare,
+ wdt->clk);
+ if (ret)
+ return ret;
clock_f = clk_get_rate(wdt->clk);
if (clock_f <= CDNS_WDT_CLK_75MHZ) {
@@ -358,56 +360,20 @@ static int cdns_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->io_lock);
- ret = watchdog_register_device(cdns_wdt_device);
+ watchdog_stop_on_reboot(cdns_wdt_device);
+ watchdog_stop_on_unregister(cdns_wdt_device);
+ ret = devm_watchdog_register_device(dev, cdns_wdt_device);
if (ret) {
- dev_err(&pdev->dev, "Failed to register wdt device\n");
- goto err_clk_disable;
+ dev_err(dev, "Failed to register wdt device\n");
+ return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
+ dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
wdt->regs, cdns_wdt_device->timeout,
nowayout ? ", nowayout" : "");
return 0;
-
-err_clk_disable:
- clk_disable_unprepare(wdt->clk);
-
- return ret;
-}
-
-/**
- * cdns_wdt_remove - Probe call for the device.
- *
- * @pdev: handle to the platform device structure.
- * Return: 0 on success, otherwise negative error.
- *
- * Unregister the device after releasing the resources.
- */
-static int cdns_wdt_remove(struct platform_device *pdev)
-{
- struct cdns_wdt *wdt = platform_get_drvdata(pdev);
-
- cdns_wdt_stop(&wdt->cdns_wdt_device);
- watchdog_unregister_device(&wdt->cdns_wdt_device);
- clk_disable_unprepare(wdt->clk);
-
- return 0;
-}
-
-/**
- * cdns_wdt_shutdown - Stop the device.
- *
- * @pdev: handle to the platform structure.
- *
- */
-static void cdns_wdt_shutdown(struct platform_device *pdev)
-{
- struct cdns_wdt *wdt = platform_get_drvdata(pdev);
-
- cdns_wdt_stop(&wdt->cdns_wdt_device);
- clk_disable_unprepare(wdt->clk);
}
/**
@@ -462,8 +428,6 @@ MODULE_DEVICE_TABLE(of, cdns_wdt_of_match);
/* Driver Structure */
static struct platform_driver cdns_wdt_driver = {
.probe = cdns_wdt_probe,
- .remove = cdns_wdt_remove,
- .shutdown = cdns_wdt_shutdown,
.driver = {
.name = "cdns-wdt",
.of_match_table = cdns_wdt_of_match,
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index f29d1edc5bad..260c50b08483 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -6,7 +6,7 @@
* Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/watchdog.h>
@@ -243,27 +243,15 @@ static struct watchdog_device coh901327_wdt = {
.timeout = U300_WDOG_DEFAULT_TIMEOUT,
};
-static int __exit coh901327_remove(struct platform_device *pdev)
-{
- watchdog_unregister_device(&coh901327_wdt);
- coh901327_disable();
- free_irq(irq, pdev);
- clk_disable_unprepare(clk);
- clk_put(clk);
- return 0;
-}
-
static int __init coh901327_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
u16 val;
- struct resource *res;
parent = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virtbase = devm_ioremap_resource(dev, res);
+ virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(virtbase))
return PTR_ERR(virtbase);
@@ -408,19 +396,13 @@ static struct platform_driver coh901327_driver = {
.driver = {
.name = "coh901327_wdog",
.of_match_table = coh901327_dt_match,
+ .suppress_bind_attrs = true,
},
- .remove = __exit_p(coh901327_remove),
.suspend = coh901327_suspend,
.resume = coh901327_resume,
};
+builtin_platform_driver_probe(coh901327_driver, coh901327_probe);
-module_platform_driver_probe(coh901327_driver, coh901327_probe);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("COH 901 327 Watchdog");
-
+/* not really modular, but ... */
module_param(margin, uint, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
-
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:coh901327-watchdog");
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index e263bad99574..a2feef1ff307 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -150,13 +150,13 @@ static const struct watchdog_ops da9052_wdt_ops = {
static int da9052_wdt_probe(struct platform_device *pdev)
{
- struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct da9052 *da9052 = dev_get_drvdata(dev->parent);
struct da9052_wdt_data *driver_data;
struct watchdog_device *da9052_wdt;
int ret;
- driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
- GFP_KERNEL);
+ driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
driver_data->da9052 = da9052;
@@ -166,18 +166,17 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
da9052_wdt->info = &da9052_wdt_info;
da9052_wdt->ops = &da9052_wdt_ops;
- da9052_wdt->parent = &pdev->dev;
+ da9052_wdt->parent = dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
DA9052_CONTROLD_TWDSCALE, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
- ret);
+ dev_err(dev, "Failed to disable watchdog bits, %d\n", ret);
return ret;
}
- ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt);
+ ret = devm_watchdog_register_device(dev, &driver_data->wdt);
if (ret != 0) {
dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
ret);
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 26a5b2984094..389a4bdd208c 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -119,13 +119,13 @@ static const struct watchdog_ops da9055_wdt_ops = {
static int da9055_wdt_probe(struct platform_device *pdev)
{
- struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct da9055 *da9055 = dev_get_drvdata(dev->parent);
struct da9055_wdt_data *driver_data;
struct watchdog_device *da9055_wdt;
int ret;
- driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
- GFP_KERNEL);
+ driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
@@ -136,17 +136,17 @@ static int da9055_wdt_probe(struct platform_device *pdev)
da9055_wdt->timeout = DA9055_DEF_TIMEOUT;
da9055_wdt->info = &da9055_wdt_info;
da9055_wdt->ops = &da9055_wdt_ops;
- da9055_wdt->parent = &pdev->dev;
+ da9055_wdt->parent = dev;
watchdog_set_nowayout(da9055_wdt, nowayout);
watchdog_set_drvdata(da9055_wdt, driver_data);
ret = da9055_wdt_stop(da9055_wdt);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to stop watchdog, %d\n", ret);
+ dev_err(dev, "Failed to stop watchdog, %d\n", ret);
return ret;
}
- ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt);
+ ret = devm_watchdog_register_device(dev, &driver_data->wdt);
if (ret != 0)
dev_err(da9055->dev, "watchdog_register_device() failed: %d\n",
ret);
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index fe169d8e1fb2..aac749cfaccb 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -46,14 +46,9 @@ static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs)
static int da9062_reset_watchdog_timer(struct da9062_watchdog *wdt)
{
- int ret;
-
- ret = regmap_update_bits(wdt->hw->regmap,
- DA9062AA_CONTROL_F,
- DA9062AA_WATCHDOG_MASK,
- DA9062AA_WATCHDOG_MASK);
-
- return ret;
+ return regmap_update_bits(wdt->hw->regmap, DA9062AA_CONTROL_F,
+ DA9062AA_WATCHDOG_MASK,
+ DA9062AA_WATCHDOG_MASK);
}
static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt,
@@ -190,15 +185,16 @@ MODULE_DEVICE_TABLE(of, da9062_compatible_id_table);
static int da9062_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct da9062 *chip;
struct da9062_watchdog *wdt;
- chip = dev_get_drvdata(pdev->dev.parent);
+ chip = dev_get_drvdata(dev->parent);
if (!chip)
return -EINVAL;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -211,13 +207,13 @@ static int da9062_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.min_hw_heartbeat_ms = DA9062_RESET_PROTECTION_MS;
wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT;
wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
- wdt->wdtdev.parent = &pdev->dev;
+ wdt->wdtdev.parent = dev;
watchdog_set_restart_priority(&wdt->wdtdev, 128);
watchdog_set_drvdata(&wdt->wdtdev, wdt);
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev);
+ ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
if (ret < 0) {
dev_err(wdt->hw->dev,
"watchdog registration failed (%d)\n", ret);
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 384dca16af8b..3d65e92a4e3f 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -188,17 +188,18 @@ static const struct watchdog_ops da9063_watchdog_ops = {
static int da9063_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct da9063 *da9063;
struct watchdog_device *wdd;
- if (!pdev->dev.parent)
+ if (!dev->parent)
return -EINVAL;
- da9063 = dev_get_drvdata(pdev->dev.parent);
+ da9063 = dev_get_drvdata(dev->parent);
if (!da9063)
return -EINVAL;
- wdd = devm_kzalloc(&pdev->dev, sizeof(*wdd), GFP_KERNEL);
+ wdd = devm_kzalloc(dev, sizeof(*wdd), GFP_KERNEL);
if (!wdd)
return -ENOMEM;
@@ -207,22 +208,24 @@ static int da9063_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = DA9063_WDT_MIN_TIMEOUT;
wdd->max_timeout = DA9063_WDT_MAX_TIMEOUT;
wdd->min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS;
- wdd->timeout = DA9063_WDG_TIMEOUT;
- wdd->parent = &pdev->dev;
-
+ wdd->parent = dev;
wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
watchdog_set_restart_priority(wdd, 128);
-
watchdog_set_drvdata(wdd, da9063);
+ /* Set default timeout, maybe override it with DT value, scale it */
+ wdd->timeout = DA9063_WDG_TIMEOUT;
+ watchdog_init_timeout(wdd, 0, dev);
+ da9063_wdt_set_timeout(wdd, wdd->timeout);
+
/* Change the timeout to the default value if the watchdog is running */
if (da9063_wdt_is_running(da9063)) {
- da9063_wdt_update_timeout(da9063, DA9063_WDG_TIMEOUT);
+ da9063_wdt_update_timeout(da9063, wdd->timeout);
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
- return devm_watchdog_register_device(&pdev->dev, wdd);
+ return devm_watchdog_register_device(dev, wdd);
}
static struct platform_driver da9063_wdt_driver = {
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index ebb85d60b6d5..7b2ee35b5ffd 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -191,11 +191,15 @@ static const struct watchdog_ops davinci_wdt_ops = {
.restart = davinci_wdt_restart,
};
+static void davinci_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int davinci_wdt_probe(struct platform_device *pdev)
{
int ret = 0;
struct device *dev = &pdev->dev;
- struct resource *wdt_mem;
struct watchdog_device *wdd;
struct davinci_wdt_device *davinci_wdt;
@@ -207,15 +211,19 @@ static int davinci_wdt_probe(struct platform_device *pdev)
if (IS_ERR(davinci_wdt->clk)) {
if (PTR_ERR(davinci_wdt->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get clock node\n");
+ dev_err(dev, "failed to get clock node\n");
return PTR_ERR(davinci_wdt->clk);
}
ret = clk_prepare_enable(davinci_wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "failed to prepare clock\n");
+ dev_err(dev, "failed to prepare clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, davinci_clk_disable_unprepare,
+ davinci_wdt->clk);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, davinci_wdt);
@@ -225,7 +233,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = 1;
wdd->max_timeout = MAX_HEARTBEAT;
wdd->timeout = DEFAULT_HEARTBEAT;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_init_timeout(wdd, heartbeat, dev);
@@ -235,35 +243,17 @@ static int davinci_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdd, 1);
watchdog_set_restart_priority(wdd, 128);
- wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
- if (IS_ERR(davinci_wdt->base)) {
- ret = PTR_ERR(davinci_wdt->base);
- goto err_clk_disable;
- }
+ davinci_wdt->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(davinci_wdt->base))
+ return PTR_ERR(davinci_wdt->base);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
dev_err(dev, "cannot register watchdog device\n");
- goto err_clk_disable;
+ return ret;
}
return 0;
-
-err_clk_disable:
- clk_disable_unprepare(davinci_wdt->clk);
-
- return ret;
-}
-
-static int davinci_wdt_remove(struct platform_device *pdev)
-{
- struct davinci_wdt_device *davinci_wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&davinci_wdt->wdd);
- clk_disable_unprepare(davinci_wdt->clk);
-
- return 0;
}
static const struct of_device_id davinci_wdt_of_match[] = {
@@ -278,7 +268,6 @@ static struct platform_driver platform_wdt_driver = {
.of_match_table = davinci_wdt_of_match,
},
.probe = davinci_wdt_probe,
- .remove = davinci_wdt_remove,
};
module_platform_driver(platform_wdt_driver);
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index a9e11df155b8..8af6e9a67d0d 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -116,7 +116,6 @@ static struct watchdog_device dc_wdt_wdd = {
static int dc_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct dc_wdt *wdt;
int ret;
@@ -125,8 +124,7 @@ static int dc_wdt_probe(struct platform_device *pdev)
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index aa95f57cc1c3..39e43750ab08 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -238,15 +238,13 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct dw_wdt *dw_wdt;
- struct resource *mem;
int ret;
dw_wdt = devm_kzalloc(dev, sizeof(*dw_wdt), GFP_KERNEL);
if (!dw_wdt)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dw_wdt->regs = devm_ioremap_resource(dev, mem);
+ dw_wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dw_wdt->regs))
return PTR_ERR(dw_wdt->regs);
diff --git a/drivers/watchdog/ebc-c384_wdt.c b/drivers/watchdog/ebc-c384_wdt.c
index 4c4c8ce78021..c176f59fea28 100644
--- a/drivers/watchdog/ebc-c384_wdt.c
+++ b/drivers/watchdog/ebc-c384_wdt.c
@@ -117,10 +117,7 @@ static int ebc_c384_wdt_probe(struct device *dev, unsigned int id)
wdd->max_timeout = WATCHDOG_MAX_TIMEOUT;
watchdog_set_nowayout(wdd, nowayout);
-
- if (watchdog_init_timeout(wdd, timeout, dev))
- dev_warn(dev, "Invalid timeout (%u seconds), using default (%u seconds)\n",
- timeout, WATCHDOG_TIMEOUT);
+ watchdog_init_timeout(wdd, timeout, dev);
return devm_watchdog_register_device(dev, wdd);
}
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index f9b14e6efd9a..38e26f160b9a 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -89,18 +89,17 @@ static const struct watchdog_ops ep93xx_wdt_ops = {
static int ep93xx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ep93xx_wdt_priv *priv;
struct watchdog_device *wdd;
- struct resource *res;
unsigned long val;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(&pdev->dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio))
return PTR_ERR(priv->mmio);
@@ -112,21 +111,21 @@ static int ep93xx_wdt_probe(struct platform_device *pdev)
wdd->ops = &ep93xx_wdt_ops;
wdd->min_timeout = 1;
wdd->max_hw_heartbeat_ms = 200;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_nowayout(wdd, nowayout);
wdd->timeout = WDT_TIMEOUT;
- watchdog_init_timeout(wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(wdd, timeout, dev);
watchdog_set_drvdata(wdd, priv);
- ret = devm_watchdog_register_device(&pdev->dev, wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
- dev_info(&pdev->dev, "EP93XX watchdog driver %s\n",
- (val & 0x08) ? " (nCS1 disable detected)" : "");
+ dev_info(dev, "EP93XX watchdog driver %s\n",
+ (val & 0x08) ? " (nCS1 disable detected)" : "");
return 0;
}
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 021c6ace9462..041172e6c469 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -338,8 +338,11 @@ static int f71862fg_pin_configure(unsigned short ioaddr)
static int watchdog_start(void)
{
+ int err;
+ u8 tmp;
+
/* Make sure we don't die as soon as the watchdog is enabled below */
- int err = watchdog_keepalive();
+ err = watchdog_keepalive();
if (err)
return err;
@@ -386,19 +389,18 @@ static int watchdog_start(void)
break;
case f81866:
- /* Set pin 70 to WDTRST# */
- superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
- BIT(3) | BIT(0));
- superio_set_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
- BIT(2));
/*
* GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0.
* The PIN 70(GPIO15/WDTRST) is controlled by 2Ch:
* BIT5: 0 -> WDTRST#
* 1 -> GPIO15
*/
- superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1,
- BIT(5));
+ tmp = superio_inb(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL);
+ tmp &= ~(BIT(3) | BIT(0));
+ tmp |= BIT(2);
+ superio_outb(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL, tmp);
+
+ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1, 5);
break;
default:
diff --git a/drivers/watchdog/ftwdt010_wdt.c b/drivers/watchdog/ftwdt010_wdt.c
index a9c2912ee280..9ea0e56fa7ee 100644
--- a/drivers/watchdog/ftwdt010_wdt.c
+++ b/drivers/watchdog/ftwdt010_wdt.c
@@ -124,7 +124,6 @@ static const struct watchdog_info ftwdt010_wdt_info = {
static int ftwdt010_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ftwdt010_wdt *gwdt;
unsigned int reg;
int irq;
@@ -134,8 +133,7 @@ static int ftwdt010_wdt_probe(struct platform_device *pdev)
if (!gwdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gwdt->base = devm_ioremap_resource(dev, res);
+ gwdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gwdt->base))
return PTR_ERR(gwdt->base);
@@ -171,7 +169,7 @@ static int ftwdt010_wdt_probe(struct platform_device *pdev)
ret = devm_watchdog_register_device(dev, &gwdt->wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog\n");
+ dev_err(dev, "failed to register watchdog\n");
return ret;
}
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index ea77cae03c9d..bc24674b4d9e 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -154,25 +154,14 @@ static int gpio_wdt_probe(struct platform_device *pdev)
priv->wdd.parent = dev;
priv->wdd.timeout = SOFT_TIMEOUT_DEF;
- watchdog_init_timeout(&priv->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&priv->wdd, 0, dev);
watchdog_stop_on_reboot(&priv->wdd);
if (priv->always_running)
gpio_wdt_start(&priv->wdd);
- ret = watchdog_register_device(&priv->wdd);
-
- return ret;
-}
-
-static int gpio_wdt_remove(struct platform_device *pdev)
-{
- struct gpio_wdt_priv *priv = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&priv->wdd);
-
- return 0;
+ return devm_watchdog_register_device(dev, &priv->wdd);
}
static const struct of_device_id gpio_wdt_dt_ids[] = {
@@ -187,7 +176,6 @@ static struct platform_driver gpio_wdt_driver = {
.of_match_table = gpio_wdt_dt_ids,
},
.probe = gpio_wdt_probe,
- .remove = gpio_wdt_remove,
};
#ifdef CONFIG_GPIO_WATCHDOG_ARCH_INITCALL
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index ef30c7e9728d..db1bf6f546ae 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -311,8 +311,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
goto error_init_nmi_decoding;
watchdog_set_nowayout(&hpwdt_dev, nowayout);
- if (watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL))
- dev_warn(&dev->dev, "Invalid soft_margin: %d.\n", soft_margin);
+ watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL);
if (pretimeout && hpwdt_dev.timeout <= PRETIMEOUT_SEC) {
dev_warn(&dev->dev, "timeout <= pretimeout. Setting pretimeout to zero\n");
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index 950c71a8bb22..17941c03996b 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -311,10 +311,7 @@ static int esb_probe(struct pci_dev *pdev,
edev->wdd.min_timeout = ESB_HEARTBEAT_MIN;
edev->wdd.max_timeout = ESB_HEARTBEAT_MAX;
edev->wdd.timeout = ESB_HEARTBEAT_DEFAULT;
- if (watchdog_init_timeout(&edev->wdd, heartbeat, NULL))
- dev_info(&pdev->dev,
- "heartbeat value must be " ESB_HEARTBEAT_RANGE
- ", using %u\n", edev->wdd.timeout);
+ watchdog_init_timeout(&edev->wdd, heartbeat, NULL);
watchdog_set_nowayout(&edev->wdd, nowayout);
watchdog_stop_on_reboot(&edev->wdd);
watchdog_stop_on_unregister(&edev->wdd);
@@ -328,8 +325,8 @@ static int esb_probe(struct pci_dev *pdev,
goto err_unmap;
}
dev_info(&pdev->dev,
- "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
- edev->base, edev->wdd.timeout, nowayout);
+ "initialized. heartbeat=%d sec (nowayout=%d)\n",
+ edev->wdd.timeout, nowayout);
return 0;
err_unmap:
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 0a5318b7865e..89cea6ce9a08 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -545,6 +545,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
}
watchdog_stop_on_reboot(&p->wddev);
+ watchdog_stop_on_unregister(&p->wddev);
ret = devm_watchdog_register_device(dev, &p->wddev);
if (ret != 0) {
pr_err("cannot register watchdog device (err=%d)\n", ret);
@@ -557,17 +558,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int iTCO_wdt_remove(struct platform_device *pdev)
-{
- struct iTCO_wdt_private *p = platform_get_drvdata(pdev);
-
- /* Stop the timer before we leave */
- if (!nowayout)
- iTCO_wdt_stop(&p->wddev);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
/*
* Suspend-to-idle requires this, because it stops the ticks and timekeeping, so
@@ -620,7 +610,6 @@ static const struct dev_pm_ops iTCO_wdt_pm = {
static struct platform_driver iTCO_wdt_driver = {
.probe = iTCO_wdt_probe,
- .remove = iTCO_wdt_remove,
.driver = {
.name = DRV_NAME,
.pm = ITCO_WDT_PM_OPS,
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index a3134ffa59f8..0fc31aadeee3 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -178,59 +178,69 @@ static const struct watchdog_ops pdc_wdt_ops = {
.restart = pdc_wdt_restart,
};
+static void pdc_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pdc_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
u64 div;
int ret, val;
unsigned long clk_rate;
- struct resource *res;
struct pdc_wdt_dev *pdc_wdt;
- pdc_wdt = devm_kzalloc(&pdev->dev, sizeof(*pdc_wdt), GFP_KERNEL);
+ pdc_wdt = devm_kzalloc(dev, sizeof(*pdc_wdt), GFP_KERNEL);
if (!pdc_wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdc_wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ pdc_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdc_wdt->base))
return PTR_ERR(pdc_wdt->base);
- pdc_wdt->sys_clk = devm_clk_get(&pdev->dev, "sys");
+ pdc_wdt->sys_clk = devm_clk_get(dev, "sys");
if (IS_ERR(pdc_wdt->sys_clk)) {
- dev_err(&pdev->dev, "failed to get the sys clock\n");
+ dev_err(dev, "failed to get the sys clock\n");
return PTR_ERR(pdc_wdt->sys_clk);
}
- pdc_wdt->wdt_clk = devm_clk_get(&pdev->dev, "wdt");
+ pdc_wdt->wdt_clk = devm_clk_get(dev, "wdt");
if (IS_ERR(pdc_wdt->wdt_clk)) {
- dev_err(&pdev->dev, "failed to get the wdt clock\n");
+ dev_err(dev, "failed to get the wdt clock\n");
return PTR_ERR(pdc_wdt->wdt_clk);
}
ret = clk_prepare_enable(pdc_wdt->sys_clk);
if (ret) {
- dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
+ dev_err(dev, "could not prepare or enable sys clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, pdc_clk_disable_unprepare,
+ pdc_wdt->sys_clk);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(pdc_wdt->wdt_clk);
if (ret) {
- dev_err(&pdev->dev, "could not prepare or enable wdt clock\n");
- goto disable_sys_clk;
+ dev_err(dev, "could not prepare or enable wdt clock\n");
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, pdc_clk_disable_unprepare,
+ pdc_wdt->wdt_clk);
+ if (ret)
+ return ret;
/* We use the clock rate to calculate the max timeout */
clk_rate = clk_get_rate(pdc_wdt->wdt_clk);
if (clk_rate == 0) {
- dev_err(&pdev->dev, "failed to get clock rate\n");
- ret = -EINVAL;
- goto disable_wdt_clk;
+ dev_err(dev, "failed to get clock rate\n");
+ return -EINVAL;
}
if (order_base_2(clk_rate) > PDC_WDT_CONFIG_DELAY_MASK + 1) {
- dev_err(&pdev->dev, "invalid clock rate\n");
- ret = -EINVAL;
- goto disable_wdt_clk;
+ dev_err(dev, "invalid clock rate\n");
+ return -EINVAL;
}
if (order_base_2(clk_rate) == 0)
@@ -245,10 +255,10 @@ static int pdc_wdt_probe(struct platform_device *pdev)
do_div(div, clk_rate);
pdc_wdt->wdt_dev.max_timeout = div;
pdc_wdt->wdt_dev.timeout = PDC_WDT_DEF_TIMEOUT;
- pdc_wdt->wdt_dev.parent = &pdev->dev;
+ pdc_wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
- watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
+ watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, dev);
pdc_wdt_stop(&pdc_wdt->wdt_dev);
@@ -259,24 +269,22 @@ static int pdc_wdt_probe(struct platform_device *pdev)
case PDC_WDT_TICKLE_STATUS_TICKLE:
case PDC_WDT_TICKLE_STATUS_TIMEOUT:
pdc_wdt->wdt_dev.bootstatus |= WDIOF_CARDRESET;
- dev_info(&pdev->dev,
- "watchdog module last reset due to timeout\n");
+ dev_info(dev, "watchdog module last reset due to timeout\n");
break;
case PDC_WDT_TICKLE_STATUS_HRESET:
- dev_info(&pdev->dev,
+ dev_info(dev,
"watchdog module last reset due to hard reset\n");
break;
case PDC_WDT_TICKLE_STATUS_SRESET:
- dev_info(&pdev->dev,
+ dev_info(dev,
"watchdog module last reset due to soft reset\n");
break;
case PDC_WDT_TICKLE_STATUS_USER:
- dev_info(&pdev->dev,
+ dev_info(dev,
"watchdog module last reset due to user reset\n");
break;
default:
- dev_info(&pdev->dev,
- "contains an illegal status code (%08x)\n", val);
+ dev_info(dev, "contains an illegal status code (%08x)\n", val);
break;
}
@@ -285,36 +293,9 @@ static int pdc_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdc_wdt);
- ret = watchdog_register_device(&pdc_wdt->wdt_dev);
- if (ret)
- goto disable_wdt_clk;
-
- return 0;
-
-disable_wdt_clk:
- clk_disable_unprepare(pdc_wdt->wdt_clk);
-disable_sys_clk:
- clk_disable_unprepare(pdc_wdt->sys_clk);
- return ret;
-}
-
-static void pdc_wdt_shutdown(struct platform_device *pdev)
-{
- struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
-
- pdc_wdt_stop(&pdc_wdt->wdt_dev);
-}
-
-static int pdc_wdt_remove(struct platform_device *pdev)
-{
- struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
-
- pdc_wdt_stop(&pdc_wdt->wdt_dev);
- watchdog_unregister_device(&pdc_wdt->wdt_dev);
- clk_disable_unprepare(pdc_wdt->wdt_clk);
- clk_disable_unprepare(pdc_wdt->sys_clk);
-
- return 0;
+ watchdog_stop_on_reboot(&pdc_wdt->wdt_dev);
+ watchdog_stop_on_unregister(&pdc_wdt->wdt_dev);
+ return devm_watchdog_register_device(dev, &pdc_wdt->wdt_dev);
}
static const struct of_device_id pdc_wdt_match[] = {
@@ -329,8 +310,6 @@ static struct platform_driver pdc_wdt_driver = {
.of_match_table = pdc_wdt_match,
},
.probe = pdc_wdt_probe,
- .remove = pdc_wdt_remove,
- .shutdown = pdc_wdt_shutdown,
};
module_platform_driver(pdc_wdt_driver);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 2b52514eaa86..a606005dd65f 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -178,8 +178,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
unsigned int new_timeout)
{
- __imx2_wdt_set_timeout(wdog, new_timeout);
+ unsigned int actual;
+ actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
+ __imx2_wdt_set_timeout(wdog, actual);
wdog->timeout = new_timeout;
return 0;
}
@@ -247,7 +249,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
{
struct imx2_wdt_device *wdev;
struct watchdog_device *wdog;
- struct resource *res;
void __iomem *base;
int ret;
u32 val;
@@ -256,8 +257,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (!wdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
new file mode 100644
index 000000000000..49848b66186c
--- /dev/null
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2019 NXP.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_TIMEOUT 60
+/*
+ * Software timer tick implemented in scfw side, support 10ms to 0xffffffff ms
+ * in theory, but for normal case, 1s~128s is enough, you can change this max
+ * value in case it's not enough.
+ */
+#define MAX_TIMEOUT 128
+
+#define IMX_SIP_TIMER 0xC2000002
+#define IMX_SIP_TIMER_START_WDOG 0x01
+#define IMX_SIP_TIMER_STOP_WDOG 0x02
+#define IMX_SIP_TIMER_SET_WDOG_ACT 0x03
+#define IMX_SIP_TIMER_PING_WDOG 0x04
+#define IMX_SIP_TIMER_SET_TIMEOUT_WDOG 0x05
+#define IMX_SIP_TIMER_GET_WDOG_STAT 0x06
+#define IMX_SIP_TIMER_SET_PRETIME_WDOG 0x07
+
+#define SC_TIMER_WDOG_ACTION_PARTITION 0
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0000);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int imx_sc_wdt_ping(struct watchdog_device *wdog)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_PING_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ return 0;
+}
+
+static int imx_sc_wdt_start(struct watchdog_device *wdog)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_START_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return -EACCES;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_WDOG_ACT,
+ SC_TIMER_WDOG_ACTION_PARTITION,
+ 0, 0, 0, 0, 0, &res);
+ return res.a0 ? -EACCES : 0;
+}
+
+static int imx_sc_wdt_stop(struct watchdog_device *wdog)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_STOP_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0 ? -EACCES : 0;
+}
+
+static int imx_sc_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
+{
+ struct arm_smccc_res res;
+
+ wdog->timeout = timeout;
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_TIMEOUT_WDOG,
+ timeout * 1000, 0, 0, 0, 0, 0, &res);
+
+ return res.a0 ? -EACCES : 0;
+}
+
+static const struct watchdog_ops imx_sc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = imx_sc_wdt_start,
+ .stop = imx_sc_wdt_stop,
+ .ping = imx_sc_wdt_ping,
+ .set_timeout = imx_sc_wdt_set_timeout,
+};
+
+static const struct watchdog_info imx_sc_wdt_info = {
+ .identity = "i.MX SC watchdog timer",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE | WDIOF_PRETIMEOUT,
+};
+
+static int imx_sc_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *imx_sc_wdd;
+ int ret;
+
+ imx_sc_wdd = devm_kzalloc(dev, sizeof(*imx_sc_wdd), GFP_KERNEL);
+ if (!imx_sc_wdd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, imx_sc_wdd);
+
+ imx_sc_wdd->info = &imx_sc_wdt_info;
+ imx_sc_wdd->ops = &imx_sc_wdt_ops;
+ imx_sc_wdd->min_timeout = 1;
+ imx_sc_wdd->max_timeout = MAX_TIMEOUT;
+ imx_sc_wdd->parent = dev;
+ imx_sc_wdd->timeout = DEFAULT_TIMEOUT;
+
+ watchdog_init_timeout(imx_sc_wdd, 0, dev);
+ watchdog_stop_on_reboot(imx_sc_wdd);
+ watchdog_stop_on_unregister(imx_sc_wdd);
+
+ ret = devm_watchdog_register_device(dev, imx_sc_wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
+{
+ struct watchdog_device *imx_sc_wdd = dev_get_drvdata(dev);
+
+ if (watchdog_active(imx_sc_wdd))
+ imx_sc_wdt_stop(imx_sc_wdd);
+
+ return 0;
+}
+
+static int __maybe_unused imx_sc_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *imx_sc_wdd = dev_get_drvdata(dev);
+
+ if (watchdog_active(imx_sc_wdd))
+ imx_sc_wdt_start(imx_sc_wdd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops,
+ imx_sc_wdt_suspend, imx_sc_wdt_resume);
+
+static const struct of_device_id imx_sc_wdt_dt_ids[] = {
+ { .compatible = "fsl,imx-sc-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_sc_wdt_dt_ids);
+
+static struct platform_driver imx_sc_wdt_driver = {
+ .probe = imx_sc_wdt_probe,
+ .driver = {
+ .name = "imx-sc-wdt",
+ .of_match_table = imx_sc_wdt_dt_ids,
+ .pm = &imx_sc_wdt_pm_ops,
+ },
+};
+module_platform_driver(imx_sc_wdt_driver);
+
+MODULE_AUTHOR("Robin Gong <yibin.gong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX system controller watchdog driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 72c108a12c19..6cf7cc1ff615 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -110,12 +110,13 @@ static const struct watchdog_ops mid_wdt_ops = {
static int mid_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdt_dev;
- struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+ struct intel_mid_wdt_pdata *pdata = dev->platform_data;
int ret;
if (!pdata) {
- dev_err(&pdev->dev, "missing platform data\n");
+ dev_err(dev, "missing platform data\n");
return -EINVAL;
}
@@ -125,7 +126,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
return ret;
}
- wdt_dev = devm_kzalloc(&pdev->dev, sizeof(*wdt_dev), GFP_KERNEL);
+ wdt_dev = devm_kzalloc(dev, sizeof(*wdt_dev), GFP_KERNEL);
if (!wdt_dev)
return -ENOMEM;
@@ -134,16 +135,15 @@ static int mid_wdt_probe(struct platform_device *pdev)
wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX;
wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
- wdt_dev->parent = &pdev->dev;
+ wdt_dev->parent = dev;
- watchdog_set_drvdata(wdt_dev, &pdev->dev);
+ watchdog_set_drvdata(wdt_dev, dev);
- ret = devm_request_irq(&pdev->dev, pdata->irq, mid_wdt_irq,
+ ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog",
wdt_dev);
if (ret) {
- dev_err(&pdev->dev, "error requesting warning irq %d\n",
- pdata->irq);
+ dev_err(dev, "error requesting warning irq %d\n", pdata->irq);
return ret;
}
@@ -163,13 +163,13 @@ static int mid_wdt_probe(struct platform_device *pdev)
/* Make sure the watchdog is serviced */
set_bit(WDOG_HW_RUNNING, &wdt_dev->status);
- ret = devm_watchdog_register_device(&pdev->dev, wdt_dev);
+ ret = devm_watchdog_register_device(dev, wdt_dev);
if (ret) {
- dev_err(&pdev->dev, "error registering watchdog device\n");
+ dev_err(dev, "error registering watchdog device\n");
return ret;
}
- dev_info(&pdev->dev, "Intel MID watchdog device probed\n");
+ dev_info(dev, "Intel MID watchdog device probed\n");
return 0;
}
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 3181a72c7ddf..f7baf75d38c0 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -25,7 +25,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/compiler.h>
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -224,7 +223,7 @@ static int intel_scu_set_heartbeat(u32 t)
watchdog_device.timer_tbl_ptr->freq_hz);
pr_debug("set_heartbeat: timer_set is %x (hex)\n",
watchdog_device.timer_set);
- pr_debug("set_hearbeat: timer_margin is %x (hex)\n", timer_margin);
+ pr_debug("set_heartbeat: timer_margin is %x (hex)\n", timer_margin);
pr_debug("set_heartbeat: threshold is %x (hex)\n",
watchdog_device.threshold);
pr_debug("set_heartbeat: soft_threshold is %x (hex)\n",
@@ -545,21 +544,4 @@ register_reboot_error:
iounmap(watchdog_device.timer_load_count_addr);
return ret;
}
-
-static void __exit intel_scu_watchdog_exit(void)
-{
-
- misc_deregister(&watchdog_device.miscdev);
- unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
- /* disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
- iounmap(watchdog_device.timer_load_count_addr);
-}
-
late_initcall(intel_scu_watchdog_init);
-module_exit(intel_scu_watchdog_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index dd139cda936c..9067998759e3 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/of.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
@@ -176,6 +177,14 @@ static int __init ixp4xx_wdt_init(void)
{
int ret;
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
pr_err("Rev. A0 IXP42x CPU detected - watchdog disabled\n");
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index ec4d99a830ba..d1bc7cbd4f2b 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -163,12 +163,12 @@ MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches);
static int jz4740_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct jz4740_wdt_drvdata *drvdata;
struct watchdog_device *jz4740_wdt;
- struct resource *res;
int ret;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(struct jz4740_wdt_drvdata),
+ drvdata = devm_kzalloc(dev, sizeof(struct jz4740_wdt_drvdata),
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@@ -182,27 +182,24 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
jz4740_wdt->timeout = heartbeat;
jz4740_wdt->min_timeout = 1;
jz4740_wdt->max_timeout = MAX_HEARTBEAT;
- jz4740_wdt->parent = &pdev->dev;
+ jz4740_wdt->parent = dev;
watchdog_set_nowayout(jz4740_wdt, nowayout);
watchdog_set_drvdata(jz4740_wdt, drvdata);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- drvdata->rtc_clk = devm_clk_get(&pdev->dev, "rtc");
+ drvdata->rtc_clk = devm_clk_get(dev, "rtc");
if (IS_ERR(drvdata->rtc_clk)) {
- dev_err(&pdev->dev, "cannot find RTC clock\n");
+ dev_err(dev, "cannot find RTC clock\n");
return PTR_ERR(drvdata->rtc_clk);
}
- ret = devm_watchdog_register_device(&pdev->dev, &drvdata->wdt);
+ ret = devm_watchdog_register_device(dev, &drvdata->wdt);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, drvdata);
-
return 0;
}
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index e268add43010..543eb0f27a42 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -467,7 +467,7 @@ static int kempld_wdt_probe(struct platform_device *pdev)
KEMPLD_WDT_CFG_GLOBAL_LOCK)) {
if (!nowayout)
dev_warn(dev,
- "Forcing nowayout - watchdog lock enabled!\n");
+ "Forcing nowayout - watchdog lock enabled!\n");
nowayout = true;
}
@@ -492,7 +492,9 @@ static int kempld_wdt_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, wdt_data);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
@@ -501,26 +503,6 @@ static int kempld_wdt_probe(struct platform_device *pdev)
return 0;
}
-static void kempld_wdt_shutdown(struct platform_device *pdev)
-{
- struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
-
- kempld_wdt_stop(&wdt_data->wdd);
-}
-
-static int kempld_wdt_remove(struct platform_device *pdev)
-{
- struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
- struct watchdog_device *wdd = &wdt_data->wdd;
- int ret = 0;
-
- if (!nowayout)
- ret = kempld_wdt_stop(wdd);
- watchdog_unregister_device(wdd);
-
- return ret;
-}
-
#ifdef CONFIG_PM
/* Disable watchdog if it is active during suspend */
static int kempld_wdt_suspend(struct platform_device *pdev,
@@ -567,8 +549,6 @@ static struct platform_driver kempld_wdt_driver = {
.name = "kempld-wdt",
},
.probe = kempld_wdt_probe,
- .remove = kempld_wdt_remove,
- .shutdown = kempld_wdt_shutdown,
.suspend = kempld_wdt_suspend,
.resume = kempld_wdt_resume,
};
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 83da84d6074b..4caf02ba5d49 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -203,7 +203,6 @@ static int ltq_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ltq_wdt_priv *priv;
struct watchdog_device *wdt;
- struct resource *res;
struct clk *clk;
const struct ltq_wdt_hw *ltq_wdt_hw;
int ret;
@@ -213,8 +212,7 @@ static int ltq_wdt_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->membase = devm_ioremap_resource(dev, res);
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
index 3aee50c64a36..d8075e2affa7 100644
--- a/drivers/watchdog/loongson1_wdt.c
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -83,38 +83,44 @@ static const struct watchdog_ops ls1x_wdt_ops = {
.set_timeout = ls1x_wdt_set_timeout,
};
+static void ls1x_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int ls1x_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ls1x_wdt_drvdata *drvdata;
struct watchdog_device *ls1x_wdt;
unsigned long clk_rate;
- struct resource *res;
int err;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- drvdata->clk = devm_clk_get(&pdev->dev, pdev->name);
+ drvdata->clk = devm_clk_get(dev, pdev->name);
if (IS_ERR(drvdata->clk))
return PTR_ERR(drvdata->clk);
err = clk_prepare_enable(drvdata->clk);
if (err) {
- dev_err(&pdev->dev, "clk enable failed\n");
+ dev_err(dev, "clk enable failed\n");
return err;
}
+ err = devm_add_action_or_reset(dev, ls1x_clk_disable_unprepare,
+ drvdata->clk);
+ if (err)
+ return err;
clk_rate = clk_get_rate(drvdata->clk);
- if (!clk_rate) {
- err = -EINVAL;
- goto err0;
- }
+ if (!clk_rate)
+ return -EINVAL;
drvdata->clk_rate = clk_rate;
ls1x_wdt = &drvdata->wdt;
@@ -123,41 +129,27 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
ls1x_wdt->timeout = DEFAULT_HEARTBEAT;
ls1x_wdt->min_timeout = 1;
ls1x_wdt->max_hw_heartbeat_ms = U32_MAX / clk_rate * 1000;
- ls1x_wdt->parent = &pdev->dev;
+ ls1x_wdt->parent = dev;
- watchdog_init_timeout(ls1x_wdt, heartbeat, &pdev->dev);
+ watchdog_init_timeout(ls1x_wdt, heartbeat, dev);
watchdog_set_nowayout(ls1x_wdt, nowayout);
watchdog_set_drvdata(ls1x_wdt, drvdata);
- err = watchdog_register_device(&drvdata->wdt);
+ err = devm_watchdog_register_device(dev, &drvdata->wdt);
if (err) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
- goto err0;
+ dev_err(dev, "failed to register watchdog device\n");
+ return err;
}
platform_set_drvdata(pdev, drvdata);
- dev_info(&pdev->dev, "Loongson1 Watchdog driver registered\n");
-
- return 0;
-err0:
- clk_disable_unprepare(drvdata->clk);
- return err;
-}
-
-static int ls1x_wdt_remove(struct platform_device *pdev)
-{
- struct ls1x_wdt_drvdata *drvdata = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&drvdata->wdt);
- clk_disable_unprepare(drvdata->clk);
+ dev_info(dev, "Loongson1 Watchdog driver registered\n");
return 0;
}
static struct platform_driver ls1x_wdt_driver = {
.probe = ls1x_wdt_probe,
- .remove = ls1x_wdt_remove,
.driver = {
.name = "ls1x-wdt",
},
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index 331cadb459ac..0e82abd71d35 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -200,19 +200,22 @@ static const struct watchdog_ops lpc18xx_wdt_ops = {
.restart = lpc18xx_wdt_restart,
};
+static void lpc18xx_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int lpc18xx_wdt_probe(struct platform_device *pdev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
lpc18xx_wdt = devm_kzalloc(dev, sizeof(*lpc18xx_wdt), GFP_KERNEL);
if (!lpc18xx_wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpc18xx_wdt->base = devm_ioremap_resource(dev, res);
+ lpc18xx_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_wdt->base))
return PTR_ERR(lpc18xx_wdt->base);
@@ -233,19 +236,26 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
dev_err(dev, "could not prepare or enable sys clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, lpc18xx_clk_disable_unprepare,
+ lpc18xx_wdt->reg_clk);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(lpc18xx_wdt->wdt_clk);
if (ret) {
dev_err(dev, "could not prepare or enable wdt clock\n");
- goto disable_reg_clk;
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, lpc18xx_clk_disable_unprepare,
+ lpc18xx_wdt->wdt_clk);
+ if (ret)
+ return ret;
/* We use the clock rate to calculate timeouts */
lpc18xx_wdt->clk_rate = clk_get_rate(lpc18xx_wdt->wdt_clk);
if (lpc18xx_wdt->clk_rate == 0) {
dev_err(dev, "failed to get clock rate\n");
- ret = -EINVAL;
- goto disable_wdt_clk;
+ return -EINVAL;
}
lpc18xx_wdt->wdt_dev.info = &lpc18xx_wdt_info;
@@ -276,24 +286,8 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lpc18xx_wdt);
- ret = watchdog_register_device(&lpc18xx_wdt->wdt_dev);
- if (ret)
- goto disable_wdt_clk;
-
- return 0;
-
-disable_wdt_clk:
- clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
-disable_reg_clk:
- clk_disable_unprepare(lpc18xx_wdt->reg_clk);
- return ret;
-}
-
-static void lpc18xx_wdt_shutdown(struct platform_device *pdev)
-{
- struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
-
- lpc18xx_wdt_stop(&lpc18xx_wdt->wdt_dev);
+ watchdog_stop_on_reboot(&lpc18xx_wdt->wdt_dev);
+ return devm_watchdog_register_device(dev, &lpc18xx_wdt->wdt_dev);
}
static int lpc18xx_wdt_remove(struct platform_device *pdev)
@@ -303,10 +297,6 @@ static int lpc18xx_wdt_remove(struct platform_device *pdev)
dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
del_timer(&lpc18xx_wdt->timer);
- watchdog_unregister_device(&lpc18xx_wdt->wdt_dev);
- clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
- clk_disable_unprepare(lpc18xx_wdt->reg_clk);
-
return 0;
}
@@ -323,7 +313,6 @@ static struct platform_driver lpc18xx_wdt_driver = {
},
.probe = lpc18xx_wdt_probe,
.remove = lpc18xx_wdt_remove,
- .shutdown = lpc18xx_wdt_shutdown,
};
module_platform_driver(lpc18xx_wdt_driver);
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 53759415cf06..c0c9e948adbc 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -177,6 +177,7 @@ static inline void zf_set_timer(unsigned short new, unsigned char n)
switch (n) {
case WD1:
zf_writew(COUNTER_1, new);
+ /* fall through */
case WD2:
zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
default:
@@ -318,7 +319,7 @@ static long zf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
- zf_ping(0);
+ zf_ping(NULL);
break;
default:
return -ENOTTY;
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index bf6a068245ba..3a899628a834 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -187,9 +187,7 @@ static void max63xx_mmap_set(struct max63xx_wdt *wdt, u8 set)
static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt)
{
- struct resource *mem = platform_get_resource(p, IORESOURCE_MEM, 0);
-
- wdt->base = devm_ioremap_resource(&p->dev, mem);
+ wdt->base = devm_platform_ioremap_resource(p, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -202,11 +200,12 @@ static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt)
static int max63xx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct max63xx_wdt *wdt;
struct max63xx_timeout *table;
int err;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -217,7 +216,7 @@ static int max63xx_wdt_probe(struct platform_device *pdev)
wdt->timeout = max63xx_select_timeout(table, heartbeat);
if (!wdt->timeout) {
- dev_err(&pdev->dev, "unable to satisfy %ds heartbeat request\n",
+ dev_err(dev, "unable to satisfy %ds heartbeat request\n",
heartbeat);
return -EINVAL;
}
@@ -229,30 +228,22 @@ static int max63xx_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &wdt->wdd);
watchdog_set_drvdata(&wdt->wdd, wdt);
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->wdd.timeout = wdt->timeout->twd;
wdt->wdd.info = &max63xx_wdt_info;
wdt->wdd.ops = &max63xx_wdt_ops;
watchdog_set_nowayout(&wdt->wdd, nowayout);
- err = watchdog_register_device(&wdt->wdd);
+ err = devm_watchdog_register_device(dev, &wdt->wdd);
if (err)
return err;
- dev_info(&pdev->dev, "using %ds heartbeat with %ds initial delay\n",
+ dev_info(dev, "using %ds heartbeat with %ds initial delay\n",
wdt->timeout->twd, wdt->timeout->tdelay);
return 0;
}
-static int max63xx_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(wdd);
- return 0;
-}
-
static const struct platform_device_id max63xx_id_table[] = {
{ "max6369_wdt", (kernel_ulong_t)max6369_table, },
{ "max6370_wdt", (kernel_ulong_t)max6369_table, },
@@ -266,7 +257,6 @@ MODULE_DEVICE_TABLE(platform, max63xx_id_table);
static struct platform_driver max63xx_wdt_driver = {
.probe = max63xx_wdt_probe,
- .remove = max63xx_wdt_remove,
.id_table = max63xx_id_table,
.driver = {
.name = "max63xx_wdt",
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index 70c9cd3ba938..3ca6b9337932 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -112,17 +112,18 @@ static const struct watchdog_ops max77620_wdt_ops = {
static int max77620_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct max77620_wdt *wdt;
struct watchdog_device *wdt_dev;
unsigned int regval;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->dev = &pdev->dev;
- wdt->rmap = dev_get_regmap(pdev->dev.parent, NULL);
+ wdt->dev = dev;
+ wdt->rmap = dev_get_regmap(dev->parent, NULL);
if (!wdt->rmap) {
dev_err(wdt->dev, "Failed to get parent regmap\n");
return -ENODEV;
@@ -183,25 +184,16 @@ static int max77620_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdt_dev, nowayout);
watchdog_set_drvdata(wdt_dev, wdt);
- ret = watchdog_register_device(wdt_dev);
+ watchdog_stop_on_unregister(wdt_dev);
+ ret = devm_watchdog_register_device(dev, wdt_dev);
if (ret < 0) {
- dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret);
+ dev_err(dev, "watchdog registration failed: %d\n", ret);
return ret;
}
return 0;
}
-static int max77620_wdt_remove(struct platform_device *pdev)
-{
- struct max77620_wdt *wdt = platform_get_drvdata(pdev);
-
- max77620_wdt_stop(&wdt->wdt_dev);
- watchdog_unregister_device(&wdt->wdt_dev);
-
- return 0;
-}
-
static const struct platform_device_id max77620_wdt_devtype[] = {
{ .name = "max77620-watchdog", },
{ },
@@ -213,7 +205,6 @@ static struct platform_driver max77620_wdt_driver = {
.name = "max77620-watchdog",
},
.probe = max77620_wdt_probe,
- .remove = max77620_wdt_remove,
.id_table = max77620_wdt_devtype,
};
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 6db69883ece6..e9ca4e0e25dc 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -127,19 +127,20 @@ static struct watchdog_device a21_wdt = {
static int a21_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct a21_wdt_drv *drv;
unsigned int reset = 0;
int num_gpios;
int ret;
int i;
- drv = devm_kzalloc(&pdev->dev, sizeof(struct a21_wdt_drv), GFP_KERNEL);
+ drv = devm_kzalloc(dev, sizeof(struct a21_wdt_drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
- num_gpios = gpiod_count(&pdev->dev, NULL);
+ num_gpios = gpiod_count(dev, NULL);
if (num_gpios != NUM_GPIOS) {
- dev_err(&pdev->dev, "gpios DT property wrong, got %d want %d",
+ dev_err(dev, "gpios DT property wrong, got %d want %d",
num_gpios, NUM_GPIOS);
return -ENODEV;
}
@@ -152,12 +153,9 @@ static int a21_wdt_probe(struct platform_device *pdev)
gflags = GPIOD_ASIS;
else
gflags = GPIOD_IN;
- drv->gpios[i] = devm_gpiod_get_index(&pdev->dev, NULL, i,
- gflags);
- if (IS_ERR(drv->gpios[i])) {
- ret = PTR_ERR(drv->gpios[i]);
- return ret;
- }
+ drv->gpios[i] = devm_gpiod_get_index(dev, NULL, i, gflags);
+ if (IS_ERR(drv->gpios[i]))
+ return PTR_ERR(drv->gpios[i]);
gpiod_set_consumer_name(drv->gpios[i], "MEN A21 Watchdog");
@@ -173,10 +171,10 @@ static int a21_wdt_probe(struct platform_device *pdev)
}
}
- watchdog_init_timeout(&a21_wdt, 30, &pdev->dev);
+ watchdog_init_timeout(&a21_wdt, 30, dev);
watchdog_set_nowayout(&a21_wdt, nowayout);
watchdog_set_drvdata(&a21_wdt, drv);
- a21_wdt.parent = &pdev->dev;
+ a21_wdt.parent = dev;
reset = a21_wdt_get_bootstatus(drv);
if (reset == 2)
@@ -189,15 +187,15 @@ static int a21_wdt_probe(struct platform_device *pdev)
a21_wdt.bootstatus |= WDIOF_EXTERN2;
drv->wdt = a21_wdt;
- dev_set_drvdata(&pdev->dev, drv);
+ dev_set_drvdata(dev, drv);
- ret = devm_watchdog_register_device(&pdev->dev, &a21_wdt);
+ ret = devm_watchdog_register_device(dev, &a21_wdt);
if (ret) {
- dev_err(&pdev->dev, "Cannot register watchdog device\n");
+ dev_err(dev, "Cannot register watchdog device\n");
return ret;
}
- dev_info(&pdev->dev, "MEN A21 watchdog timer driver enabled\n");
+ dev_info(dev, "MEN A21 watchdog timer driver enabled\n");
return 0;
}
diff --git a/drivers/watchdog/menf21bmc_wdt.c b/drivers/watchdog/menf21bmc_wdt.c
index 3aefddebb386..b1dbff553cdc 100644
--- a/drivers/watchdog/menf21bmc_wdt.c
+++ b/drivers/watchdog/menf21bmc_wdt.c
@@ -117,12 +117,12 @@ static const struct watchdog_ops menf21bmc_wdt_ops = {
static int menf21bmc_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret, bmc_timeout;
struct menf21bmc_wdt *drv_data;
- struct i2c_client *i2c_client = to_i2c_client(pdev->dev.parent);
+ struct i2c_client *i2c_client = to_i2c_client(dev->parent);
- drv_data = devm_kzalloc(&pdev->dev,
- sizeof(struct menf21bmc_wdt), GFP_KERNEL);
+ drv_data = devm_kzalloc(dev, sizeof(struct menf21bmc_wdt), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
@@ -130,7 +130,7 @@ static int menf21bmc_wdt_probe(struct platform_device *pdev)
drv_data->wdt.info = &menf21bmc_wdt_info;
drv_data->wdt.min_timeout = BMC_WD_TIMEOUT_MIN;
drv_data->wdt.max_timeout = BMC_WD_TIMEOUT_MAX;
- drv_data->wdt.parent = &pdev->dev;
+ drv_data->wdt.parent = dev;
drv_data->i2c_client = i2c_client;
/*
@@ -140,40 +140,28 @@ static int menf21bmc_wdt_probe(struct platform_device *pdev)
bmc_timeout = i2c_smbus_read_word_data(drv_data->i2c_client,
BMC_CMD_WD_TIME);
if (bmc_timeout < 0) {
- dev_err(&pdev->dev, "failed to get current WDT timeout\n");
+ dev_err(dev, "failed to get current WDT timeout\n");
return bmc_timeout;
}
- watchdog_init_timeout(&drv_data->wdt, bmc_timeout / 10, &pdev->dev);
+ watchdog_init_timeout(&drv_data->wdt, bmc_timeout / 10, dev);
watchdog_set_nowayout(&drv_data->wdt, nowayout);
watchdog_set_drvdata(&drv_data->wdt, drv_data);
platform_set_drvdata(pdev, drv_data);
ret = menf21bmc_wdt_set_bootstatus(drv_data);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to set Watchdog bootstatus\n");
+ dev_err(dev, "failed to set Watchdog bootstatus\n");
return ret;
}
- ret = watchdog_register_device(&drv_data->wdt);
+ ret = devm_watchdog_register_device(dev, &drv_data->wdt);
if (ret) {
- dev_err(&pdev->dev, "failed to register Watchdog device\n");
+ dev_err(dev, "failed to register Watchdog device\n");
return ret;
}
- dev_info(&pdev->dev, "MEN 14F021P00 BMC Watchdog device enabled\n");
-
- return 0;
-}
-
-static int menf21bmc_wdt_remove(struct platform_device *pdev)
-{
- struct menf21bmc_wdt *drv_data = platform_get_drvdata(pdev);
-
- dev_warn(&pdev->dev,
- "Unregister MEN 14F021P00 BMC Watchdog device, board may reset\n");
-
- watchdog_unregister_device(&drv_data->wdt);
+ dev_info(dev, "MEN 14F021P00 BMC Watchdog device enabled\n");
return 0;
}
@@ -191,7 +179,6 @@ static struct platform_driver menf21bmc_wdt = {
.name = DEVNAME,
},
.probe = menf21bmc_wdt_probe,
- .remove = menf21bmc_wdt_remove,
.shutdown = menf21bmc_wdt_shutdown,
};
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 69adeab3fde7..d17c1a6ed723 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -136,32 +136,40 @@ static const struct of_device_id meson_gxbb_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids);
+static void meson_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int meson_gxbb_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct meson_gxbb_wdt *data;
- struct resource *res;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ data->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->reg_base))
return PTR_ERR(data->reg_base);
- data->clk = devm_clk_get(&pdev->dev, NULL);
+ data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, meson_clk_disable_unprepare,
+ data->clk);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, data);
- data->wdt_dev.parent = &pdev->dev;
+ data->wdt_dev.parent = dev;
data->wdt_dev.info = &meson_gxbb_wdt_info;
data->wdt_dev.ops = &meson_gxbb_wdt_ops;
data->wdt_dev.max_hw_heartbeat_ms = GXBB_WDT_TCNT_SETUP_MASK;
@@ -178,37 +186,12 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
- ret = watchdog_register_device(&data->wdt_dev);
- if (ret) {
- clk_disable_unprepare(data->clk);
- return ret;
- }
-
- return 0;
-}
-
-static int meson_gxbb_wdt_remove(struct platform_device *pdev)
-{
- struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&data->wdt_dev);
-
- clk_disable_unprepare(data->clk);
-
- return 0;
-}
-
-static void meson_gxbb_wdt_shutdown(struct platform_device *pdev)
-{
- struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
-
- meson_gxbb_wdt_stop(&data->wdt_dev);
+ watchdog_stop_on_reboot(&data->wdt_dev);
+ return devm_watchdog_register_device(dev, &data->wdt_dev);
}
static struct platform_driver meson_gxbb_wdt_driver = {
.probe = meson_gxbb_wdt_probe,
- .remove = meson_gxbb_wdt_remove,
- .shutdown = meson_gxbb_wdt_shutdown,
.driver = {
.name = "meson-gxbb-wdt",
.pm = &meson_gxbb_wdt_pm_ops,
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index cd0275a6cdac..01889cef81e1 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -164,28 +164,27 @@ MODULE_DEVICE_TABLE(of, meson_wdt_dt_ids);
static int meson_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
struct meson_wdt_dev *meson_wdt;
const struct of_device_id *of_id;
int err;
- meson_wdt = devm_kzalloc(&pdev->dev, sizeof(*meson_wdt), GFP_KERNEL);
+ meson_wdt = devm_kzalloc(dev, sizeof(*meson_wdt), GFP_KERNEL);
if (!meson_wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- meson_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ meson_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson_wdt->wdt_base))
return PTR_ERR(meson_wdt->wdt_base);
- of_id = of_match_device(meson_wdt_dt_ids, &pdev->dev);
+ of_id = of_match_device(meson_wdt_dt_ids, dev);
if (!of_id) {
- dev_err(&pdev->dev, "Unable to initialize WDT data\n");
+ dev_err(dev, "Unable to initialize WDT data\n");
return -ENODEV;
}
meson_wdt->data = of_id->data;
- meson_wdt->wdt_dev.parent = &pdev->dev;
+ meson_wdt->wdt_dev.parent = dev;
meson_wdt->wdt_dev.info = &meson_wdt_info;
meson_wdt->wdt_dev.ops = &meson_wdt_ops;
meson_wdt->wdt_dev.max_timeout =
@@ -197,18 +196,18 @@ static int meson_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&meson_wdt->wdt_dev, meson_wdt);
- watchdog_init_timeout(&meson_wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&meson_wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&meson_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&meson_wdt->wdt_dev, 128);
meson_wdt_stop(&meson_wdt->wdt_dev);
watchdog_stop_on_reboot(&meson_wdt->wdt_dev);
- err = devm_watchdog_register_device(&pdev->dev, &meson_wdt->wdt_dev);
+ err = devm_watchdog_register_device(dev, &meson_wdt->wdt_dev);
if (err)
return err;
- dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
+ dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
meson_wdt->wdt_dev.timeout, nowayout);
return 0;
diff --git a/drivers/watchdog/mlx_wdt.c b/drivers/watchdog/mlx_wdt.c
index 70c2cbf9c993..03b9ac4b99af 100644
--- a/drivers/watchdog/mlx_wdt.c
+++ b/drivers/watchdog/mlx_wdt.c
@@ -233,20 +233,21 @@ static int mlxreg_wdt_init_timeout(struct mlxreg_wdt *wdt,
static int mlxreg_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct mlxreg_core_platform_data *pdata;
struct mlxreg_wdt *wdt;
int rc;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "Failed to get platform data.\n");
+ dev_err(dev, "Failed to get platform data.\n");
return -EINVAL;
}
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->regmap = pdata->regmap;
mlxreg_wdt_config(wdt, pdata);
@@ -266,12 +267,11 @@ static int mlxreg_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
mlxreg_wdt_check_card_reset(wdt);
- rc = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
+ rc = devm_watchdog_register_device(dev, &wdt->wdd);
register_error:
if (rc)
- dev_err(&pdev->dev,
- "Cannot register watchdog device (err=%d)\n", rc);
+ dev_err(dev, "Cannot register watchdog device (err=%d)\n", rc);
return rc;
}
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 430c3ab84c07..6340a1f5f471 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -91,8 +91,6 @@ static int moxart_wdt_probe(struct platform_device *pdev)
{
struct moxart_wdt_dev *moxart_wdt;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- struct resource *res;
struct clk *clk;
int err;
unsigned int max_timeout;
@@ -104,12 +102,11 @@ static int moxart_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, moxart_wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- moxart_wdt->base = devm_ioremap_resource(dev, res);
+ moxart_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(moxart_wdt->base))
return PTR_ERR(moxart_wdt->base);
- clk = of_clk_get(node, 0);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
pr_err("%s: of_clk_get failed\n", __func__);
return PTR_ERR(clk);
@@ -136,7 +133,8 @@ static int moxart_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt);
- err = watchdog_register_device(&moxart_wdt->dev);
+ watchdog_stop_on_unregister(&moxart_wdt->dev);
+ err = devm_watchdog_register_device(dev, &moxart_wdt->dev);
if (err)
return err;
@@ -146,15 +144,6 @@ static int moxart_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int moxart_wdt_remove(struct platform_device *pdev)
-{
- struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
-
- moxart_wdt_stop(&moxart_wdt->dev);
-
- return 0;
-}
-
static const struct of_device_id moxart_watchdog_match[] = {
{ .compatible = "moxa,moxart-watchdog" },
{ },
@@ -163,7 +152,6 @@ MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
static struct platform_driver moxart_wdt_driver = {
.probe = moxart_wdt_probe,
- .remove = moxart_wdt_remove,
.driver = {
.name = "moxart-watchdog",
.of_match_table = moxart_watchdog_match,
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 069072e6747d..9b6d6a5a27ad 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -149,8 +149,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
if (!ddata)
return -ENOMEM;
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- ddata->base = devm_ioremap_resource(dev, res);
+ ddata->base = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(ddata->base))
return PTR_ERR(ddata->base);
@@ -205,9 +204,10 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
if (ddata->wdd.timeout < ddata->wdd.min_timeout)
ddata->wdd.timeout = ddata->wdd.min_timeout;
- ret = watchdog_register_device(&ddata->wdd);
+ ret = devm_watchdog_register_device(dev, &ddata->wdd);
if (ret) {
- dev_err(dev, "cannot register watchdog device (err=%d)\n", ret);
+ dev_err(dev, "cannot register watchdog device (err=%d)\n",
+ ret);
return ret;
}
@@ -219,17 +219,6 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
return 0;
}
-static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
-{
- struct mpc8xxx_wdt_ddata *ddata = platform_get_drvdata(ofdev);
-
- dev_crit(&ofdev->dev, "Watchdog removed, expect the %s soon!\n",
- reset ? "reset" : "machine check exception");
- watchdog_unregister_device(&ddata->wdd);
-
- return 0;
-}
-
static const struct of_device_id mpc8xxx_wdt_match[] = {
{
.compatible = "mpc83xx_wdt",
@@ -260,7 +249,6 @@ MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match);
static struct platform_driver mpc8xxx_wdt_driver = {
.probe = mpc8xxx_wdt_probe,
- .remove = mpc8xxx_wdt_remove,
.driver = {
.name = "mpc8xxx_wdt",
.of_match_table = mpc8xxx_wdt_match,
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 81208cd3f4ec..cbb3c0dde136 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -133,21 +133,19 @@ static struct watchdog_device mt7621_wdt_dev = {
static int mt7621_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt7621_wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ struct device *dev = &pdev->dev;
+ mt7621_wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mt7621_wdt_base))
return PTR_ERR(mt7621_wdt_base);
- mt7621_wdt_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ mt7621_wdt_reset = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(mt7621_wdt_reset))
reset_control_deassert(mt7621_wdt_reset);
mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause();
watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout,
- &pdev->dev);
+ dev);
watchdog_set_nowayout(&mt7621_wdt_dev, nowayout);
if (mt7621_wdt_is_running(&mt7621_wdt_dev)) {
/*
@@ -164,7 +162,7 @@ static int mt7621_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &mt7621_wdt_dev.status);
}
- return devm_watchdog_register_device(&pdev->dev, &mt7621_wdt_dev);
+ return devm_watchdog_register_device(dev, &mt7621_wdt_dev);
}
static void mt7621_wdt_shutdown(struct platform_device *pdev)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 4baf64f21aa1..9c3d0033260d 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -153,18 +153,17 @@ static const struct watchdog_ops mtk_wdt_ops = {
static int mtk_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct mtk_wdt_dev *mtk_wdt;
- struct resource *res;
int err;
- mtk_wdt = devm_kzalloc(&pdev->dev, sizeof(*mtk_wdt), GFP_KERNEL);
+ mtk_wdt = devm_kzalloc(dev, sizeof(*mtk_wdt), GFP_KERNEL);
if (!mtk_wdt)
return -ENOMEM;
platform_set_drvdata(pdev, mtk_wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mtk_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ mtk_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mtk_wdt->wdt_base))
return PTR_ERR(mtk_wdt->wdt_base);
@@ -173,9 +172,9 @@ static int mtk_wdt_probe(struct platform_device *pdev)
mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
- mtk_wdt->wdt_dev.parent = &pdev->dev;
+ mtk_wdt->wdt_dev.parent = dev;
- watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&mtk_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&mtk_wdt->wdt_dev, 128);
@@ -183,29 +182,13 @@ static int mtk_wdt_probe(struct platform_device *pdev)
mtk_wdt_stop(&mtk_wdt->wdt_dev);
- err = watchdog_register_device(&mtk_wdt->wdt_dev);
+ watchdog_stop_on_reboot(&mtk_wdt->wdt_dev);
+ err = devm_watchdog_register_device(dev, &mtk_wdt->wdt_dev);
if (unlikely(err))
return err;
- dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
- mtk_wdt->wdt_dev.timeout, nowayout);
-
- return 0;
-}
-
-static void mtk_wdt_shutdown(struct platform_device *pdev)
-{
- struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
-
- if (watchdog_active(&mtk_wdt->wdt_dev))
- mtk_wdt_stop(&mtk_wdt->wdt_dev);
-}
-
-static int mtk_wdt_remove(struct platform_device *pdev)
-{
- struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&mtk_wdt->wdt_dev);
+ dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
+ mtk_wdt->wdt_dev.timeout, nowayout);
return 0;
}
@@ -247,8 +230,6 @@ static const struct dev_pm_ops mtk_wdt_pm_ops = {
static struct platform_driver mtk_wdt_driver = {
.probe = mtk_wdt_probe,
- .remove = mtk_wdt_remove,
- .shutdown = mtk_wdt_shutdown,
.driver = {
.name = DRV_NAME,
.pm = &mtk_wdt_pm_ops,
diff --git a/drivers/watchdog/ni903x_wdt.c b/drivers/watchdog/ni903x_wdt.c
index dc67742e9018..fbc1df86c6cc 100644
--- a/drivers/watchdog/ni903x_wdt.c
+++ b/drivers/watchdog/ni903x_wdt.c
@@ -217,9 +217,7 @@ static int ni903x_acpi_add(struct acpi_device *device)
wdd->parent = dev;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
- ret = watchdog_init_timeout(wdd, timeout, dev);
- if (ret)
- dev_err(dev, "unable to set timeout value, using default\n");
+ watchdog_init_timeout(wdd, timeout, dev);
ret = watchdog_register_device(wdd);
if (ret) {
diff --git a/drivers/watchdog/nic7018_wdt.c b/drivers/watchdog/nic7018_wdt.c
index dcd265685837..82843abe38f8 100644
--- a/drivers/watchdog/nic7018_wdt.c
+++ b/drivers/watchdog/nic7018_wdt.c
@@ -211,10 +211,7 @@ static int nic7018_probe(struct platform_device *pdev)
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
-
- ret = watchdog_init_timeout(wdd, timeout, dev);
- if (ret)
- dev_warn(dev, "unable to set timeout value, using default\n");
+ watchdog_init_timeout(wdd, timeout, dev);
/* Unlock WDT register */
outb(UNLOCK, wdt->io_base + WDT_REG_LOCK);
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
index 0d4213652ecc..9d6c1689b12c 100644
--- a/drivers/watchdog/npcm_wdt.c
+++ b/drivers/watchdog/npcm_wdt.c
@@ -181,16 +181,14 @@ static int npcm_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct npcm_wdt *wdt;
- struct resource *res;
int irq;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->reg = devm_ioremap_resource(dev, res);
+ wdt->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->reg))
return PTR_ERR(wdt->reg);
@@ -216,8 +214,8 @@ static int npcm_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
- ret = devm_request_irq(dev, irq, npcm_wdt_interrupt, 0,
- "watchdog", wdt);
+ ret = devm_request_irq(dev, irq, npcm_wdt_interrupt, 0, "watchdog",
+ wdt);
if (ret)
return ret;
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index 8a36350bab7b..f36eae34e848 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -242,7 +242,6 @@ static struct miscdevice nuc900wdt_miscdev = {
static int nuc900wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
int ret = 0;
nuc900_wdt = devm_kzalloc(&pdev->dev, sizeof(*nuc900_wdt),
@@ -254,8 +253,7 @@ static int nuc900wdt_probe(struct platform_device *pdev)
spin_lock_init(&nuc900_wdt->wdt_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nuc900_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ nuc900_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nuc900_wdt->wdt_base))
return PTR_ERR(nuc900_wdt->wdt_base);
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index d3f7eb046678..03786992b701 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -151,43 +151,46 @@ static u32 xwdt_selftest(struct xwdt_device *xdev)
return XWT_TIMER_FAILED;
}
+static void xwdt_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int xwdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int rc;
u32 pfreq = 0, enable_once = 0;
- struct resource *res;
struct xwdt_device *xdev;
struct watchdog_device *xilinx_wdt_wdd;
- xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
if (!xdev)
return -ENOMEM;
xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
xilinx_wdt_wdd->info = &xilinx_wdt_ident;
xilinx_wdt_wdd->ops = &xilinx_wdt_ops;
- xilinx_wdt_wdd->parent = &pdev->dev;
+ xilinx_wdt_wdd->parent = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->base = devm_ioremap_resource(&pdev->dev, res);
+ xdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->base))
return PTR_ERR(xdev->base);
- rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-interval",
+ rc = of_property_read_u32(dev->of_node, "xlnx,wdt-interval",
&xdev->wdt_interval);
if (rc)
- dev_warn(&pdev->dev,
- "Parameter \"xlnx,wdt-interval\" not found\n");
+ dev_warn(dev, "Parameter \"xlnx,wdt-interval\" not found\n");
- rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-enable-once",
+ rc = of_property_read_u32(dev->of_node, "xlnx,wdt-enable-once",
&enable_once);
if (rc)
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"Parameter \"xlnx,wdt-enable-once\" not found\n");
watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
- xdev->clk = devm_clk_get(&pdev->dev, NULL);
+ xdev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(xdev->clk)) {
if (PTR_ERR(xdev->clk) != -ENOENT)
return PTR_ERR(xdev->clk);
@@ -198,10 +201,10 @@ static int xwdt_probe(struct platform_device *pdev)
*/
xdev->clk = NULL;
- rc = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ rc = of_property_read_u32(dev->of_node, "clock-frequency",
&pfreq);
if (rc)
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"The watchdog clock freq cannot be obtained\n");
} else {
pfreq = clk_get_rate(xdev->clk);
@@ -220,44 +223,34 @@ static int xwdt_probe(struct platform_device *pdev)
rc = clk_prepare_enable(xdev->clk);
if (rc) {
- dev_err(&pdev->dev, "unable to enable clock\n");
+ dev_err(dev, "unable to enable clock\n");
return rc;
}
+ rc = devm_add_action_or_reset(dev, xwdt_clk_disable_unprepare,
+ xdev->clk);
+ if (rc)
+ return rc;
rc = xwdt_selftest(xdev);
if (rc == XWT_TIMER_FAILED) {
- dev_err(&pdev->dev, "SelfTest routine error\n");
- goto err_clk_disable;
+ dev_err(dev, "SelfTest routine error\n");
+ return rc;
}
- rc = watchdog_register_device(xilinx_wdt_wdd);
+ rc = devm_watchdog_register_device(dev, xilinx_wdt_wdd);
if (rc) {
- dev_err(&pdev->dev, "Cannot register watchdog (err=%d)\n", rc);
- goto err_clk_disable;
+ dev_err(dev, "Cannot register watchdog (err=%d)\n", rc);
+ return rc;
}
clk_disable(xdev->clk);
- dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
+ dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
xdev->base, xilinx_wdt_wdd->timeout);
platform_set_drvdata(pdev, xdev);
return 0;
-err_clk_disable:
- clk_disable_unprepare(xdev->clk);
-
- return rc;
-}
-
-static int xwdt_remove(struct platform_device *pdev)
-{
- struct xwdt_device *xdev = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&xdev->xilinx_wdt_wdd);
- clk_disable_unprepare(xdev->clk);
-
- return 0;
}
/**
@@ -305,7 +298,6 @@ MODULE_DEVICE_TABLE(of, xwdt_of_match);
static struct platform_driver xwdt_driver = {
.probe = xwdt_probe,
- .remove = xwdt_remove,
.driver = {
.name = WATCHDOG_NAME,
.of_match_table = xwdt_of_match,
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index cbd752f9ac56..d49688d93f6a 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -231,7 +231,6 @@ static const struct watchdog_ops omap_wdt_ops = {
static int omap_wdt_probe(struct platform_device *pdev)
{
struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct resource *res;
struct omap_wdt_dev *wdev;
int ret;
@@ -245,8 +244,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
mutex_init(&wdev->lock);
/* reserve static register mappings */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdev->base = devm_ioremap_resource(&pdev->dev, res);
+ wdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdev->base))
return PTR_ERR(wdev->base);
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 9db3b09f7568..cdb0d174c5e2 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -349,13 +349,6 @@ static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate;
}
-static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev,
- unsigned int timeout)
-{
- wdt_dev->timeout = timeout;
- return 0;
-}
-
static const struct watchdog_info orion_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "Orion Watchdog",
@@ -366,7 +359,6 @@ static const struct watchdog_ops orion_wdt_ops = {
.start = orion_wdt_start,
.stop = orion_wdt_stop,
.ping = orion_wdt_ping,
- .set_timeout = orion_wdt_set_timeout,
.get_timeleft = orion_wdt_get_timeleft,
};
@@ -502,8 +494,7 @@ static int orion_wdt_get_regs(struct platform_device *pdev,
of_device_is_compatible(node, "marvell,armada-xp-wdt")) {
/* Dedicated RSTOUT register, can be requested. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dev->rstout = devm_ioremap_resource(&pdev->dev, res);
+ dev->rstout = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dev->rstout))
return PTR_ERR(dev->rstout);
@@ -511,8 +502,7 @@ static int orion_wdt_get_regs(struct platform_device *pdev,
of_device_is_compatible(node, "marvell,armada-380-wdt")) {
/* Dedicated RSTOUT register, can be requested. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dev->rstout = devm_ioremap_resource(&pdev->dev, res);
+ dev->rstout = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dev->rstout))
return PTR_ERR(dev->rstout);
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
index c797305f8338..9a3c53e03c60 100644
--- a/drivers/watchdog/pic32-dmt.c
+++ b/drivers/watchdog/pic32-dmt.c
@@ -168,70 +168,61 @@ static struct watchdog_device pic32_dmt_wdd = {
.ops = &pic32_dmt_fops,
};
+static void pic32_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pic32_dmt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct pic32_dmt *dmt;
- struct resource *mem;
struct watchdog_device *wdd = &pic32_dmt_wdd;
- dmt = devm_kzalloc(&pdev->dev, sizeof(*dmt), GFP_KERNEL);
+ dmt = devm_kzalloc(dev, sizeof(*dmt), GFP_KERNEL);
if (!dmt)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmt->regs = devm_ioremap_resource(&pdev->dev, mem);
+ dmt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmt->regs))
return PTR_ERR(dmt->regs);
- dmt->clk = devm_clk_get(&pdev->dev, NULL);
+ dmt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(dmt->clk)) {
- dev_err(&pdev->dev, "clk not found\n");
+ dev_err(dev, "clk not found\n");
return PTR_ERR(dmt->clk);
}
ret = clk_prepare_enable(dmt->clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, pic32_clk_disable_unprepare,
+ dmt->clk);
+ if (ret)
+ return ret;
wdd->timeout = pic32_dmt_get_timeout_secs(dmt);
if (!wdd->timeout) {
- dev_err(&pdev->dev,
- "failed to read watchdog register timeout\n");
- ret = -EINVAL;
- goto out_disable_clk;
+ dev_err(dev, "failed to read watchdog register timeout\n");
+ return -EINVAL;
}
- dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+ dev_info(dev, "timeout %d\n", wdd->timeout);
wdd->bootstatus = pic32_dmt_bootstatus(dmt) ? WDIOF_CARDRESET : 0;
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdd, dmt);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
- goto out_disable_clk;
+ dev_err(dev, "watchdog register failed, err %d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, wdd);
return 0;
-
-out_disable_clk:
- clk_disable_unprepare(dmt->clk);
- return ret;
-}
-
-static int pic32_dmt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
-
- watchdog_unregister_device(wdd);
- clk_disable_unprepare(dmt->clk);
-
- return 0;
}
static const struct of_device_id pic32_dmt_of_ids[] = {
@@ -242,7 +233,6 @@ MODULE_DEVICE_TABLE(of, pic32_dmt_of_ids);
static struct platform_driver pic32_dmt_driver = {
.probe = pic32_dmt_probe,
- .remove = pic32_dmt_remove,
.driver = {
.name = "pic32-dmt",
.of_match_table = of_match_ptr(pic32_dmt_of_ids),
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
index e2761068dc6f..540500940cc0 100644
--- a/drivers/watchdog/pic32-wdt.c
+++ b/drivers/watchdog/pic32-wdt.c
@@ -166,89 +166,77 @@ static const struct of_device_id pic32_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, pic32_wdt_dt_ids);
+static void pic32_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pic32_wdt_drv_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct watchdog_device *wdd = &pic32_wdd;
struct pic32_wdt *wdt;
- struct resource *mem;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->regs = devm_ioremap_resource(&pdev->dev, mem);
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->regs))
return PTR_ERR(wdt->regs);
- wdt->rst_base = devm_ioremap(&pdev->dev, PIC32_BASE_RESET, 0x10);
+ wdt->rst_base = devm_ioremap(dev, PIC32_BASE_RESET, 0x10);
if (!wdt->rst_base)
return -ENOMEM;
- wdt->clk = devm_clk_get(&pdev->dev, NULL);
+ wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(&pdev->dev, "clk not found\n");
+ dev_err(dev, "clk not found\n");
return PTR_ERR(wdt->clk);
}
ret = clk_prepare_enable(wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "clk enable failed\n");
+ dev_err(dev, "clk enable failed\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, pic32_clk_disable_unprepare,
+ wdt->clk);
+ if (ret)
+ return ret;
if (pic32_wdt_is_win_enabled(wdt)) {
- dev_err(&pdev->dev, "windowed-clear mode is not supported.\n");
- ret = -ENODEV;
- goto out_disable_clk;
+ dev_err(dev, "windowed-clear mode is not supported.\n");
+ return -ENODEV;
}
- wdd->timeout = pic32_wdt_get_timeout_secs(wdt, &pdev->dev);
+ wdd->timeout = pic32_wdt_get_timeout_secs(wdt, dev);
if (!wdd->timeout) {
- dev_err(&pdev->dev,
- "failed to read watchdog register timeout\n");
- ret = -EINVAL;
- goto out_disable_clk;
+ dev_err(dev, "failed to read watchdog register timeout\n");
+ return -EINVAL;
}
- dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+ dev_info(dev, "timeout %d\n", wdd->timeout);
wdd->bootstatus = pic32_wdt_bootstatus(wdt) ? WDIOF_CARDRESET : 0;
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdd, wdt);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
- goto out_disable_clk;
+ dev_err(dev, "watchdog register failed, err %d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, wdd);
return 0;
-
-out_disable_clk:
- clk_disable_unprepare(wdt->clk);
-
- return ret;
-}
-
-static int pic32_wdt_drv_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
- struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
-
- watchdog_unregister_device(wdd);
- clk_disable_unprepare(wdt->clk);
-
- return 0;
}
static struct platform_driver pic32_wdt_driver = {
.probe = pic32_wdt_drv_probe,
- .remove = pic32_wdt_drv_remove,
.driver = {
.name = "pic32-wdt",
.of_match_table = of_match_ptr(pic32_wdt_dt_ids),
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 7f10041fcf5b..2d3652004e39 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -132,15 +132,16 @@ static const struct watchdog_ops pm8916_wdt_ops = {
static int pm8916_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct pm8916_wdt *wdt;
struct device *parent;
int err, irq;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- parent = pdev->dev.parent;
+ parent = dev->parent;
/*
* The pm8916-pon-wdt is a child of the pon device, which is a child
@@ -150,20 +151,20 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
*/
wdt->regmap = dev_get_regmap(parent->parent, NULL);
if (!wdt->regmap) {
- dev_err(&pdev->dev, "failed to locate regmap\n");
+ dev_err(dev, "failed to locate regmap\n");
return -ENODEV;
}
err = device_property_read_u32(parent, "reg", &wdt->baseaddr);
if (err) {
- dev_err(&pdev->dev, "failed to get pm8916-pon address\n");
+ dev_err(dev, "failed to get pm8916-pon address\n");
return err;
}
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- if (devm_request_irq(&pdev->dev, irq, pm8916_wdt_isr, 0,
- "pm8916_wdt", wdt))
+ if (devm_request_irq(dev, irq, pm8916_wdt_isr, 0, "pm8916_wdt",
+ wdt))
irq = 0;
}
@@ -172,23 +173,23 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL,
RESET_TYPE_HARD);
if (err) {
- dev_err(&pdev->dev, "failed configure watchdog\n");
+ dev_err(dev, "failed configure watchdog\n");
return err;
}
wdt->wdev.info = (irq > 0) ? &pm8916_wdt_pt_ident : &pm8916_wdt_ident,
wdt->wdev.ops = &pm8916_wdt_ops,
- wdt->wdev.parent = &pdev->dev;
+ wdt->wdev.parent = dev;
wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT;
wdt->wdev.max_timeout = PM8916_WDT_MAX_TIMEOUT;
wdt->wdev.timeout = PM8916_WDT_DEFAULT_TIMEOUT;
wdt->wdev.pretimeout = 0;
watchdog_set_drvdata(&wdt->wdev, wdt);
- watchdog_init_timeout(&wdt->wdev, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdev, 0, dev);
pm8916_wdt_configure_timers(&wdt->wdev);
- return devm_watchdog_register_device(&pdev->dev, &wdt->wdev);
+ return devm_watchdog_register_device(dev, &wdt->wdev);
}
static const struct of_device_id pm8916_wdt_id_table[] = {
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 8e261799c84e..d9e03544aeae 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -183,54 +183,50 @@ static struct watchdog_device pnx4008_wdd = {
.max_timeout = MAX_HEARTBEAT,
};
+static void pnx4008_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int pnx4008_wdt_probe(struct platform_device *pdev)
{
- struct resource *r;
+ struct device *dev = &pdev->dev;
int ret = 0;
- watchdog_init_timeout(&pnx4008_wdd, heartbeat, &pdev->dev);
+ watchdog_init_timeout(&pnx4008_wdd, heartbeat, dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_ioremap_resource(&pdev->dev, r);
+ wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
- wdt_clk = devm_clk_get(&pdev->dev, NULL);
+ wdt_clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt_clk))
return PTR_ERR(wdt_clk);
ret = clk_prepare_enable(wdt_clk);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, pnx4008_clk_disable_unprepare,
+ wdt_clk);
+ if (ret)
+ return ret;
pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
WDIOF_CARDRESET : 0;
- pnx4008_wdd.parent = &pdev->dev;
+ pnx4008_wdd.parent = dev;
watchdog_set_nowayout(&pnx4008_wdd, nowayout);
watchdog_set_restart_priority(&pnx4008_wdd, 128);
- pnx4008_wdt_stop(&pnx4008_wdd); /* disable for now */
+ if (readl(WDTIM_CTRL(wdt_base)) & COUNT_ENAB)
+ set_bit(WDOG_HW_RUNNING, &pnx4008_wdd.status);
- ret = watchdog_register_device(&pnx4008_wdd);
+ ret = devm_watchdog_register_device(dev, &pnx4008_wdd);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot register watchdog device\n");
- goto disable_clk;
+ dev_err(dev, "cannot register watchdog device\n");
+ return ret;
}
- dev_info(&pdev->dev, "heartbeat %d sec\n", pnx4008_wdd.timeout);
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(wdt_clk);
- return ret;
-}
-
-static int pnx4008_wdt_remove(struct platform_device *pdev)
-{
- watchdog_unregister_device(&pnx4008_wdd);
-
- clk_disable_unprepare(wdt_clk);
+ dev_info(dev, "heartbeat %d sec\n", pnx4008_wdd.timeout);
return 0;
}
@@ -249,7 +245,6 @@ static struct platform_driver platform_wdt_driver = {
.of_match_table = of_match_ptr(pnx4008_wdt_match),
},
.probe = pnx4008_wdt_probe,
- .remove = pnx4008_wdt_remove,
};
module_platform_driver(platform_wdt_driver);
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 5dfd604477a4..6d29c33b1316 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -142,22 +142,28 @@ static const struct watchdog_info qcom_wdt_info = {
.identity = KBUILD_MODNAME,
};
+static void qcom_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int qcom_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct qcom_wdt *wdt;
struct resource *res;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
const u32 *regs;
u32 percpu_offset;
int ret;
- regs = of_device_get_match_data(&pdev->dev);
+ regs = of_device_get_match_data(dev);
if (!regs) {
- dev_err(&pdev->dev, "Unsupported QCOM WDT module\n");
+ dev_err(dev, "Unsupported QCOM WDT module\n");
return -ENODEV;
}
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -172,21 +178,25 @@ static int qcom_wdt_probe(struct platform_device *pdev)
res->start += percpu_offset;
res->end += percpu_offset;
- wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ wdt->base = devm_ioremap_resource(dev, res);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->clk = devm_clk_get(&pdev->dev, NULL);
+ wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(&pdev->dev, "failed to get input clock\n");
+ dev_err(dev, "failed to get input clock\n");
return PTR_ERR(wdt->clk);
}
ret = clk_prepare_enable(wdt->clk);
if (ret) {
- dev_err(&pdev->dev, "failed to setup clock\n");
+ dev_err(dev, "failed to setup clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, qcom_clk_disable_unprepare,
+ wdt->clk);
+ if (ret)
+ return ret;
/*
* We use the clock rate to calculate the max timeout, so ensure it's
@@ -199,16 +209,15 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->rate = clk_get_rate(wdt->clk);
if (wdt->rate == 0 ||
wdt->rate > 0x10000000U) {
- dev_err(&pdev->dev, "invalid clock rate\n");
- ret = -EINVAL;
- goto err_clk_unprepare;
+ dev_err(dev, "invalid clock rate\n");
+ return -EINVAL;
}
wdt->wdd.info = &qcom_wdt_info;
wdt->wdd.ops = &qcom_wdt_ops;
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->layout = regs;
if (readl(wdt_addr(wdt, WDT_STS)) & 1)
@@ -220,29 +229,16 @@ static int qcom_wdt_probe(struct platform_device *pdev)
* the max instead.
*/
wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U);
- watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
- ret = watchdog_register_device(&wdt->wdd);
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog\n");
- goto err_clk_unprepare;
+ dev_err(dev, "failed to register watchdog\n");
+ return ret;
}
platform_set_drvdata(pdev, wdt);
return 0;
-
-err_clk_unprepare:
- clk_disable_unprepare(wdt->clk);
- return ret;
-}
-
-static int qcom_wdt_remove(struct platform_device *pdev)
-{
- struct qcom_wdt *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdd);
- clk_disable_unprepare(wdt->clk);
- return 0;
}
static int __maybe_unused qcom_wdt_suspend(struct device *dev)
@@ -277,7 +273,6 @@ MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
static struct platform_driver qcom_watchdog_driver = {
.probe = qcom_wdt_probe,
- .remove = qcom_wdt_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qcom_wdt_of_table,
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 622ede529912..565dbc1ec638 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -151,7 +151,6 @@ static const struct soc_device_attribute rwdt_quirks_match[] = {
.data = (void *)1, /* needs single CPU */
}, {
.soc_id = "r8a7792",
- .revision = "*",
.data = (void *)0, /* needs SMP disabled */
},
{ /* sentinel */ }
@@ -177,7 +176,6 @@ static inline bool rwdt_blacklisted(struct device *dev) { return false; }
static int rwdt_probe(struct platform_device *pdev)
{
struct rwdt_priv *priv;
- struct resource *res;
struct clk *clk;
unsigned long clks_per_sec;
int ret, i;
@@ -189,8 +187,7 @@ static int rwdt_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -238,9 +235,7 @@ static int rwdt_probe(struct platform_device *pdev)
watchdog_stop_on_unregister(&priv->wdev);
/* This overrides the default timeout only if DT configuration was found */
- ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
- if (ret)
- dev_warn(&pdev->dev, "Specified timeout value invalid, using default\n");
+ watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
ret = watchdog_register_device(&priv->wdev);
if (ret < 0)
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
index e60f55702ab7..21fcb36f9074 100644
--- a/drivers/watchdog/rn5t618_wdt.c
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -146,11 +146,12 @@ static const struct watchdog_ops rn5t618_wdt_ops = {
static int rn5t618_wdt_probe(struct platform_device *pdev)
{
- struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct rn5t618 *rn5t618 = dev_get_drvdata(dev->parent);
struct rn5t618_wdt *wdt;
int min_timeout, max_timeout;
- wdt = devm_kzalloc(&pdev->dev, sizeof(struct rn5t618_wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(struct rn5t618_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -163,10 +164,10 @@ static int rn5t618_wdt_probe(struct platform_device *pdev)
wdt->wdt_dev.min_timeout = min_timeout;
wdt->wdt_dev.max_timeout = max_timeout;
wdt->wdt_dev.timeout = max_timeout;
- wdt->wdt_dev.parent = &pdev->dev;
+ wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&wdt->wdt_dev, wdt);
- watchdog_init_timeout(&wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&wdt->wdt_dev, nowayout);
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index db7c57d82cfd..905e60f45eec 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -141,19 +141,18 @@ static struct watchdog_device rt288x_wdt_dev = {
static int rt288x_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rt288x_wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ rt288x_wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rt288x_wdt_base))
return PTR_ERR(rt288x_wdt_base);
- rt288x_wdt_clk = devm_clk_get(&pdev->dev, NULL);
+ rt288x_wdt_clk = devm_clk_get(dev, NULL);
if (IS_ERR(rt288x_wdt_clk))
return PTR_ERR(rt288x_wdt_clk);
- rt288x_wdt_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ rt288x_wdt_reset = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(rt288x_wdt_reset))
reset_control_deassert(rt288x_wdt_reset);
@@ -161,31 +160,20 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
- rt288x_wdt_dev.parent = &pdev->dev;
+ rt288x_wdt_dev.parent = dev;
watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
- &pdev->dev);
+ dev);
watchdog_set_nowayout(&rt288x_wdt_dev, nowayout);
- ret = watchdog_register_device(&rt288x_wdt_dev);
+ watchdog_stop_on_reboot(&rt288x_wdt_dev);
+ ret = devm_watchdog_register_device(dev, &rt288x_wdt_dev);
if (!ret)
- dev_info(&pdev->dev, "Initialized\n");
+ dev_info(dev, "Initialized\n");
return 0;
}
-static int rt288x_wdt_remove(struct platform_device *pdev)
-{
- watchdog_unregister_device(&rt288x_wdt_dev);
-
- return 0;
-}
-
-static void rt288x_wdt_shutdown(struct platform_device *pdev)
-{
- rt288x_wdt_stop(&rt288x_wdt_dev);
-}
-
static const struct of_device_id rt288x_wdt_match[] = {
{ .compatible = "ralink,rt2880-wdt" },
{},
@@ -194,8 +182,6 @@ MODULE_DEVICE_TABLE(of, rt288x_wdt_match);
static struct platform_driver rt288x_wdt_driver = {
.probe = rt288x_wdt_probe,
- .remove = rt288x_wdt_remove,
- .shutdown = rt288x_wdt_shutdown,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = rt288x_wdt_match,
diff --git a/drivers/watchdog/rtd119x_wdt.c b/drivers/watchdog/rtd119x_wdt.c
index d001c17ddfde..834b94ff3f90 100644
--- a/drivers/watchdog/rtd119x_wdt.c
+++ b/drivers/watchdog/rtd119x_wdt.c
@@ -9,7 +9,6 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -95,37 +94,43 @@ static const struct of_device_id rtd119x_wdt_dt_ids[] = {
{ }
};
+static void rtd119x_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int rtd119x_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rtd119x_watchdog_device *data;
- struct resource *res;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->clk = of_clk_get(pdev->dev.of_node, 0);
+ data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
- if (ret) {
- clk_put(data->clk);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, rtd119x_clk_disable_unprepare,
+ data->clk);
+ if (ret)
return ret;
- }
data->wdt_dev.info = &rtd119x_wdt_info;
data->wdt_dev.ops = &rtd119x_wdt_ops;
data->wdt_dev.timeout = 120;
data->wdt_dev.max_timeout = 0xffffffff / clk_get_rate(data->clk);
data->wdt_dev.min_timeout = 1;
- data->wdt_dev.parent = &pdev->dev;
+ data->wdt_dev.parent = dev;
watchdog_stop_on_reboot(&data->wdt_dev);
watchdog_set_drvdata(&data->wdt_dev, data);
@@ -135,31 +140,11 @@ static int rtd119x_wdt_probe(struct platform_device *pdev)
rtd119x_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
rtd119x_wdt_stop(&data->wdt_dev);
- ret = devm_watchdog_register_device(&pdev->dev, &data->wdt_dev);
- if (ret) {
- clk_disable_unprepare(data->clk);
- clk_put(data->clk);
- return ret;
- }
-
- return 0;
-}
-
-static int rtd119x_wdt_remove(struct platform_device *pdev)
-{
- struct rtd119x_watchdog_device *data = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&data->wdt_dev);
-
- clk_disable_unprepare(data->clk);
- clk_put(data->clk);
-
- return 0;
+ return devm_watchdog_register_device(dev, &data->wdt_dev);
}
static struct platform_driver rtd119x_wdt_driver = {
.probe = rtd119x_wdt_probe,
- .remove = rtd119x_wdt_remove,
.driver = {
.name = "rtd1295-watchdog",
.of_match_table = rtd119x_wdt_dt_ids,
diff --git a/drivers/watchdog/rza_wdt.c b/drivers/watchdog/rza_wdt.c
index 781bb572e6af..7b6c365f7cd3 100644
--- a/drivers/watchdog/rza_wdt.c
+++ b/drivers/watchdog/rza_wdt.c
@@ -166,35 +166,34 @@ static const struct watchdog_ops rza_wdt_ops = {
static int rza_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rza_wdt *priv;
- struct resource *res;
unsigned long rate;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
rate = clk_get_rate(priv->clk);
if (rate < 16384) {
- dev_err(&pdev->dev, "invalid clock rate (%ld)\n", rate);
+ dev_err(dev, "invalid clock rate (%ld)\n", rate);
return -ENOENT;
}
priv->wdev.info = &rza_wdt_ident,
priv->wdev.ops = &rza_wdt_ops,
- priv->wdev.parent = &pdev->dev;
+ priv->wdev.parent = dev;
- priv->cks = (u8)(uintptr_t)of_device_get_match_data(&pdev->dev);
+ priv->cks = (u8)(uintptr_t) of_device_get_match_data(dev);
if (priv->cks == CKS_4BIT) {
/* Assume slowest clock rate possible (CKS=0xF) */
priv->wdev.max_timeout = (DIVIDER_4BIT * U8_MAX) / rate;
@@ -209,19 +208,19 @@ static int rza_wdt_probe(struct platform_device *pdev)
* max_hw_heartbeat_ms.
*/
priv->wdev.max_hw_heartbeat_ms = (1000 * U8_MAX) / rate;
- dev_dbg(&pdev->dev, "max hw timeout of %dms\n",
- priv->wdev.max_hw_heartbeat_ms);
+ dev_dbg(dev, "max hw timeout of %dms\n",
+ priv->wdev.max_hw_heartbeat_ms);
}
priv->wdev.min_timeout = 1;
priv->wdev.timeout = DEFAULT_TIMEOUT;
- watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
+ watchdog_init_timeout(&priv->wdev, 0, dev);
watchdog_set_drvdata(&priv->wdev, priv);
- ret = devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+ ret = devm_watchdog_register_device(dev, &priv->wdev);
if (ret)
- dev_err(&pdev->dev, "Cannot register watchdog device\n");
+ dev_err(dev, "Cannot register watchdog device\n");
return ret;
}
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index adaa43543f0a..4267b9e8734b 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -522,7 +522,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct s3c2410_wdt *wdt;
- struct resource *wdt_mem;
struct resource *wdt_irq;
unsigned int wtcon;
int started = 0;
@@ -554,8 +553,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
/* get the memory region for the watchdog timer */
- wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->reg_base = devm_ioremap_resource(dev, wdt_mem);
+ wdt->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->reg_base)) {
ret = PTR_ERR(wdt->reg_base);
goto err;
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index 1e93c1b0e3cf..111695223aae 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -199,15 +199,15 @@ static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
static int sama5d4_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct sama5d4_wdt *wdt;
- struct resource *res;
void __iomem *regs;
u32 irq = 0;
u32 timeout;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -221,33 +221,31 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(wdd, wdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
wdt->reg_base = regs;
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
- dev_warn(&pdev->dev, "failed to get IRQ from DT\n");
+ dev_warn(dev, "failed to get IRQ from DT\n");
- ret = of_sama5d4_wdt_init(pdev->dev.of_node, wdt);
+ ret = of_sama5d4_wdt_init(dev->of_node, wdt);
if (ret)
return ret;
if ((wdt->mr & AT91_WDT_WDFIEN) && irq) {
- ret = devm_request_irq(&pdev->dev, irq, sama5d4_wdt_irq_handler,
+ ret = devm_request_irq(dev, irq, sama5d4_wdt_irq_handler,
IRQF_SHARED | IRQF_IRQPOLL |
IRQF_NO_SUSPEND, pdev->name, pdev);
if (ret) {
- dev_err(&pdev->dev,
- "cannot register interrupt handler\n");
+ dev_err(dev, "cannot register interrupt handler\n");
return ret;
}
}
- watchdog_init_timeout(wdd, wdt_timeout, &pdev->dev);
+ watchdog_init_timeout(wdd, wdt_timeout, dev);
timeout = WDT_SEC2TICKS(wdd->timeout);
@@ -260,31 +258,21 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdd, nowayout);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
+ dev_err(dev, "failed to register watchdog device\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev, "initialized (timeout = %d sec, nowayout = %d)\n",
+ dev_info(dev, "initialized (timeout = %d sec, nowayout = %d)\n",
wdd->timeout, nowayout);
return 0;
}
-static int sama5d4_wdt_remove(struct platform_device *pdev)
-{
- struct sama5d4_wdt *wdt = platform_get_drvdata(pdev);
-
- sama5d4_wdt_stop(&wdt->wdd);
-
- watchdog_unregister_device(&wdt->wdd);
-
- return 0;
-}
-
static const struct of_device_id sama5d4_wdt_of_match[] = {
{ .compatible = "atmel,sama5d4-wdt", },
{ }
@@ -312,7 +300,6 @@ static SIMPLE_DEV_PM_OPS(sama5d4_wdt_pm_ops, NULL,
static struct platform_driver sama5d4_wdt_driver = {
.probe = sama5d4_wdt_probe,
- .remove = sama5d4_wdt_remove,
.driver = {
.name = "sama5d4_wdt",
.pm = &sama5d4_wdt_pm_ops,
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 0692d42e5c67..5a6ced7a7e8f 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(sbwd_lock);
*
* wdog is the iomem address of the cfg register
*/
-void sbwdog_set(char __iomem *wdog, unsigned long t)
+static void sbwdog_set(char __iomem *wdog, unsigned long t)
{
spin_lock(&sbwd_lock);
__raw_writeb(0, wdog);
@@ -81,7 +81,7 @@ void sbwdog_set(char __iomem *wdog, unsigned long t)
*
* wdog is the iomem address of the cfg register
*/
-void sbwdog_pet(char __iomem *wdog)
+static void sbwdog_pet(char __iomem *wdog)
{
spin_lock(&sbwd_lock);
__raw_writeb(__raw_readb(wdog) | 1, wdog);
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index e221e47396ab..3219422f67a9 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -231,7 +231,6 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct sbsa_gwdt *gwdt;
- struct resource *res;
int ret, irq;
u32 status;
@@ -240,13 +239,11 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, gwdt);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cf_base = devm_ioremap_resource(dev, res);
+ cf_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cf_base))
return PTR_ERR(cf_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- rf_base = devm_ioremap_resource(dev, res);
+ rf_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(rf_base))
return PTR_ERR(rf_base);
@@ -313,7 +310,8 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
*/
sbsa_gwdt_set_timeout(wdd, wdd->timeout);
- ret = watchdog_register_device(wdd);
+ watchdog_stop_on_reboot(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
@@ -324,22 +322,6 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
return 0;
}
-static void sbsa_gwdt_shutdown(struct platform_device *pdev)
-{
- struct sbsa_gwdt *gwdt = platform_get_drvdata(pdev);
-
- sbsa_gwdt_stop(&gwdt->wdd);
-}
-
-static int sbsa_gwdt_remove(struct platform_device *pdev)
-{
- struct sbsa_gwdt *gwdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&gwdt->wdd);
-
- return 0;
-}
-
/* Disable watchdog if it is active during suspend */
static int __maybe_unused sbsa_gwdt_suspend(struct device *dev)
{
@@ -385,8 +367,6 @@ static struct platform_driver sbsa_gwdt_driver = {
.of_match_table = sbsa_gwdt_of_match,
},
.probe = sbsa_gwdt_probe,
- .remove = sbsa_gwdt_remove,
- .shutdown = sbsa_gwdt_shutdown,
.id_table = sbsa_gwdt_pdev_match,
};
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index a7d6425db807..e7617b7df70b 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -220,7 +220,6 @@ static struct watchdog_device sh_wdt_dev = {
static int sh_wdt_probe(struct platform_device *pdev)
{
struct sh_wdt *wdt;
- struct resource *res;
int rc;
/*
@@ -245,8 +244,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
wdt->clk = NULL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(wdt->dev, res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index ac0c9d2c4aee..e79a4097d50b 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -146,22 +146,23 @@ static struct watchdog_device sirfsoc_wdd = {
static int sirfsoc_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct device *dev = &pdev->dev;
int ret;
void __iomem *base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
watchdog_set_drvdata(&sirfsoc_wdd, (__force void *)base);
- watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(&sirfsoc_wdd, timeout, dev);
watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
- sirfsoc_wdd.parent = &pdev->dev;
+ sirfsoc_wdd.parent = dev;
- ret = watchdog_register_device(&sirfsoc_wdd);
+ watchdog_stop_on_reboot(&sirfsoc_wdd);
+ watchdog_stop_on_unregister(&sirfsoc_wdd);
+ ret = devm_watchdog_register_device(dev, &sirfsoc_wdd);
if (ret)
return ret;
@@ -170,19 +171,6 @@ static int sirfsoc_wdt_probe(struct platform_device *pdev)
return 0;
}
-static void sirfsoc_wdt_shutdown(struct platform_device *pdev)
-{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
-
- sirfsoc_wdt_disable(wdd);
-}
-
-static int sirfsoc_wdt_remove(struct platform_device *pdev)
-{
- sirfsoc_wdt_shutdown(pdev);
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int sirfsoc_wdt_suspend(struct device *dev)
{
@@ -220,8 +208,6 @@ static struct platform_driver sirfsoc_wdt_driver = {
.of_match_table = sirfsoc_wdt_of_match,
},
.probe = sirfsoc_wdt_probe,
- .remove = sirfsoc_wdt_remove,
- .shutdown = sirfsoc_wdt_shutdown,
};
module_platform_driver(sirfsoc_wdt_driver);
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 41aaae2d5287..553735b256e2 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -395,9 +395,7 @@ static int sp5100_tco_probe(struct platform_device *pdev)
wdd->min_timeout = 1;
wdd->max_timeout = 0xffff;
- if (watchdog_init_timeout(wdd, heartbeat, NULL))
- dev_info(dev, "timeout value invalid, using %d\n",
- wdd->timeout);
+ watchdog_init_timeout(wdd, heartbeat, NULL);
watchdog_set_nowayout(wdd, nowayout);
watchdog_stop_on_reboot(wdd);
watchdog_stop_on_unregister(wdd);
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index ff9397d9638a..14874e9b207b 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -245,9 +245,7 @@ static u32 sprd_wdt_get_timeleft(struct watchdog_device *wdd)
u32 val;
val = sprd_wdt_get_cnt_value(wdt);
- val = val / SPRD_WDT_CNT_STEP;
-
- return val;
+ return val / SPRD_WDT_CNT_STEP;
}
static const struct watchdog_ops sprd_wdt_ops = {
@@ -269,70 +267,68 @@ static const struct watchdog_info sprd_wdt_info = {
static int sprd_wdt_probe(struct platform_device *pdev)
{
- struct resource *wdt_res;
+ struct device *dev = &pdev->dev;
struct sprd_wdt *wdt;
int ret;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(&pdev->dev, wdt_res);
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->enable = devm_clk_get(&pdev->dev, "enable");
+ wdt->enable = devm_clk_get(dev, "enable");
if (IS_ERR(wdt->enable)) {
- dev_err(&pdev->dev, "can't get the enable clock\n");
+ dev_err(dev, "can't get the enable clock\n");
return PTR_ERR(wdt->enable);
}
- wdt->rtc_enable = devm_clk_get(&pdev->dev, "rtc_enable");
+ wdt->rtc_enable = devm_clk_get(dev, "rtc_enable");
if (IS_ERR(wdt->rtc_enable)) {
- dev_err(&pdev->dev, "can't get the rtc enable clock\n");
+ dev_err(dev, "can't get the rtc enable clock\n");
return PTR_ERR(wdt->rtc_enable);
}
wdt->irq = platform_get_irq(pdev, 0);
if (wdt->irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ resource\n");
+ dev_err(dev, "failed to get IRQ resource\n");
return wdt->irq;
}
- ret = devm_request_irq(&pdev->dev, wdt->irq, sprd_wdt_isr,
- IRQF_NO_SUSPEND, "sprd-wdt", (void *)wdt);
+ ret = devm_request_irq(dev, wdt->irq, sprd_wdt_isr, IRQF_NO_SUSPEND,
+ "sprd-wdt", (void *)wdt);
if (ret) {
- dev_err(&pdev->dev, "failed to register irq\n");
+ dev_err(dev, "failed to register irq\n");
return ret;
}
wdt->wdd.info = &sprd_wdt_info;
wdt->wdd.ops = &sprd_wdt_ops;
- wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.parent = dev;
wdt->wdd.min_timeout = SPRD_WDT_MIN_TIMEOUT;
wdt->wdd.max_timeout = SPRD_WDT_MAX_TIMEOUT;
wdt->wdd.timeout = SPRD_WDT_MAX_TIMEOUT;
ret = sprd_wdt_enable(wdt);
if (ret) {
- dev_err(&pdev->dev, "failed to enable wdt\n");
+ dev_err(dev, "failed to enable wdt\n");
return ret;
}
- ret = devm_add_action(&pdev->dev, sprd_wdt_disable, wdt);
+ ret = devm_add_action_or_reset(dev, sprd_wdt_disable, wdt);
if (ret) {
- sprd_wdt_disable(wdt);
- dev_err(&pdev->dev, "Failed to add wdt disable action\n");
+ dev_err(dev, "Failed to add wdt disable action\n");
return ret;
}
watchdog_set_nowayout(&wdt->wdd, WATCHDOG_NOWAYOUT);
- watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret) {
sprd_wdt_disable(wdt);
- dev_err(&pdev->dev, "failed to register watchdog\n");
+ dev_err(dev, "failed to register watchdog\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 177829b379da..7a90184eb950 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -142,13 +142,18 @@ static struct watchdog_device st_wdog_dev = {
.ops = &st_wdog_ops,
};
+static void st_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int st_wdog_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *match;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct st_wdog *st_wdog;
struct regmap *regmap;
- struct resource *res;
struct clk *clk;
void __iomem *base;
uint32_t mode;
@@ -156,7 +161,7 @@ static int st_wdog_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "st,lpc-mode", &mode);
if (ret) {
- dev_err(&pdev->dev, "An LPC mode must be provided\n");
+ dev_err(dev, "An LPC mode must be provided\n");
return -EINVAL;
}
@@ -164,35 +169,34 @@ static int st_wdog_probe(struct platform_device *pdev)
if (mode != ST_LPC_MODE_WDT)
return -ENODEV;
- st_wdog = devm_kzalloc(&pdev->dev, sizeof(*st_wdog), GFP_KERNEL);
+ st_wdog = devm_kzalloc(dev, sizeof(*st_wdog), GFP_KERNEL);
if (!st_wdog)
return -ENOMEM;
- match = of_match_device(st_wdog_match, &pdev->dev);
+ match = of_match_device(st_wdog_match, dev);
if (!match) {
- dev_err(&pdev->dev, "Couldn't match device\n");
+ dev_err(dev, "Couldn't match device\n");
return -ENODEV;
}
st_wdog->syscfg = (struct st_wdog_syscfg *)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(regmap)) {
- dev_err(&pdev->dev, "No syscfg phandle specified\n");
+ dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(regmap);
}
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Unable to request clock\n");
+ dev_err(dev, "Unable to request clock\n");
return PTR_ERR(clk);
}
- st_wdog->dev = &pdev->dev;
+ st_wdog->dev = dev;
st_wdog->base = base;
st_wdog->clk = clk;
st_wdog->regmap = regmap;
@@ -200,39 +204,38 @@ static int st_wdog_probe(struct platform_device *pdev)
st_wdog->clkrate = clk_get_rate(st_wdog->clk);
if (!st_wdog->clkrate) {
- dev_err(&pdev->dev, "Unable to fetch clock rate\n");
+ dev_err(dev, "Unable to fetch clock rate\n");
return -EINVAL;
}
st_wdog_dev.max_timeout = 0xFFFFFFFF / st_wdog->clkrate;
- st_wdog_dev.parent = &pdev->dev;
+ st_wdog_dev.parent = dev;
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock\n");
+ dev_err(dev, "Unable to enable clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, st_clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
watchdog_set_drvdata(&st_wdog_dev, st_wdog);
watchdog_set_nowayout(&st_wdog_dev, WATCHDOG_NOWAYOUT);
/* Init Watchdog timeout with value in DT */
- ret = watchdog_init_timeout(&st_wdog_dev, 0, &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "Unable to initialise watchdog timeout\n");
- clk_disable_unprepare(clk);
+ ret = watchdog_init_timeout(&st_wdog_dev, 0, dev);
+ if (ret)
return ret;
- }
- ret = watchdog_register_device(&st_wdog_dev);
+ ret = devm_watchdog_register_device(dev, &st_wdog_dev);
if (ret) {
- dev_err(&pdev->dev, "Unable to register watchdog\n");
- clk_disable_unprepare(clk);
+ dev_err(dev, "Unable to register watchdog\n");
return ret;
}
st_wdog_setup(st_wdog, true);
- dev_info(&pdev->dev, "LPC Watchdog driver registered, reset type is %s",
+ dev_info(dev, "LPC Watchdog driver registered, reset type is %s",
st_wdog->warm_reset ? "warm" : "cold");
return ret;
@@ -243,8 +246,6 @@ static int st_wdog_remove(struct platform_device *pdev)
struct st_wdog *st_wdog = watchdog_get_drvdata(&st_wdog_dev);
st_wdog_setup(st_wdog, false);
- watchdog_unregister_device(&st_wdog_dev);
- clk_disable_unprepare(st_wdog->clk);
return 0;
}
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index e00e3b3526c6..d569a3634d9b 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -34,36 +34,44 @@
#define KR_KEY_EWA 0x5555 /* write access enable */
#define KR_KEY_DWA 0x0000 /* write access disable */
-/* IWDG_PR register bit values */
-#define PR_4 0x00 /* prescaler set to 4 */
-#define PR_8 0x01 /* prescaler set to 8 */
-#define PR_16 0x02 /* prescaler set to 16 */
-#define PR_32 0x03 /* prescaler set to 32 */
-#define PR_64 0x04 /* prescaler set to 64 */
-#define PR_128 0x05 /* prescaler set to 128 */
-#define PR_256 0x06 /* prescaler set to 256 */
+/* IWDG_PR register */
+#define PR_SHIFT 2
+#define PR_MIN BIT(PR_SHIFT)
/* IWDG_RLR register values */
-#define RLR_MIN 0x07C /* min value supported by reload register */
-#define RLR_MAX 0xFFF /* max value supported by reload register */
+#define RLR_MIN 0x2 /* min value recommended */
+#define RLR_MAX GENMASK(11, 0) /* max value of reload register */
/* IWDG_SR register bit mask */
-#define FLAG_PVU BIT(0) /* Watchdog prescaler value update */
-#define FLAG_RVU BIT(1) /* Watchdog counter reload value update */
+#define SR_PVU BIT(0) /* Watchdog prescaler value update */
+#define SR_RVU BIT(1) /* Watchdog counter reload value update */
/* set timeout to 100000 us */
#define TIMEOUT_US 100000
#define SLEEP_US 1000
-#define HAS_PCLK true
+struct stm32_iwdg_data {
+ bool has_pclk;
+ u32 max_prescaler;
+};
+
+static const struct stm32_iwdg_data stm32_iwdg_data = {
+ .has_pclk = false,
+ .max_prescaler = 256,
+};
+
+static const struct stm32_iwdg_data stm32mp1_iwdg_data = {
+ .has_pclk = true,
+ .max_prescaler = 1024,
+};
struct stm32_iwdg {
struct watchdog_device wdd;
+ const struct stm32_iwdg_data *data;
void __iomem *regs;
struct clk *clk_lsi;
struct clk *clk_pclk;
unsigned int rate;
- bool has_pclk;
};
static inline u32 reg_read(void __iomem *base, u32 reg)
@@ -79,31 +87,35 @@ static inline void reg_write(void __iomem *base, u32 reg, u32 val)
static int stm32_iwdg_start(struct watchdog_device *wdd)
{
struct stm32_iwdg *wdt = watchdog_get_drvdata(wdd);
- u32 val = FLAG_PVU | FLAG_RVU;
- u32 reload;
+ u32 tout, presc, iwdg_rlr, iwdg_pr, iwdg_sr;
int ret;
dev_dbg(wdd->parent, "%s\n", __func__);
- /* prescaler fixed to 256 */
- reload = clamp_t(unsigned int, ((wdd->timeout * wdt->rate) / 256) - 1,
- RLR_MIN, RLR_MAX);
+ tout = clamp_t(unsigned int, wdd->timeout,
+ wdd->min_timeout, wdd->max_hw_heartbeat_ms / 1000);
+
+ presc = DIV_ROUND_UP(tout * wdt->rate, RLR_MAX + 1);
+
+ /* The prescaler is align on power of 2 and start at 2 ^ PR_SHIFT. */
+ presc = roundup_pow_of_two(presc);
+ iwdg_pr = presc <= 1 << PR_SHIFT ? 0 : ilog2(presc) - PR_SHIFT;
+ iwdg_rlr = ((tout * wdt->rate) / presc) - 1;
/* enable write access */
reg_write(wdt->regs, IWDG_KR, KR_KEY_EWA);
/* set prescaler & reload registers */
- reg_write(wdt->regs, IWDG_PR, PR_256); /* prescaler fix to 256 */
- reg_write(wdt->regs, IWDG_RLR, reload);
+ reg_write(wdt->regs, IWDG_PR, iwdg_pr);
+ reg_write(wdt->regs, IWDG_RLR, iwdg_rlr);
reg_write(wdt->regs, IWDG_KR, KR_KEY_ENABLE);
/* wait for the registers to be updated (max 100ms) */
- ret = readl_relaxed_poll_timeout(wdt->regs + IWDG_SR, val,
- !(val & (FLAG_PVU | FLAG_RVU)),
+ ret = readl_relaxed_poll_timeout(wdt->regs + IWDG_SR, iwdg_sr,
+ !(iwdg_sr & (SR_PVU | SR_RVU)),
SLEEP_US, TIMEOUT_US);
if (ret) {
- dev_err(wdd->parent,
- "Fail to set prescaler or reload registers\n");
+ dev_err(wdd->parent, "Fail to set prescaler, reload regs\n");
return ret;
}
@@ -138,38 +150,52 @@ static int stm32_iwdg_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static void stm32_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int stm32_iwdg_clk_init(struct platform_device *pdev,
struct stm32_iwdg *wdt)
{
+ struct device *dev = &pdev->dev;
u32 ret;
- wdt->clk_lsi = devm_clk_get(&pdev->dev, "lsi");
+ wdt->clk_lsi = devm_clk_get(dev, "lsi");
if (IS_ERR(wdt->clk_lsi)) {
- dev_err(&pdev->dev, "Unable to get lsi clock\n");
+ dev_err(dev, "Unable to get lsi clock\n");
return PTR_ERR(wdt->clk_lsi);
}
/* optional peripheral clock */
- if (wdt->has_pclk) {
- wdt->clk_pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (wdt->data->has_pclk) {
+ wdt->clk_pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(wdt->clk_pclk)) {
- dev_err(&pdev->dev, "Unable to get pclk clock\n");
+ dev_err(dev, "Unable to get pclk clock\n");
return PTR_ERR(wdt->clk_pclk);
}
ret = clk_prepare_enable(wdt->clk_pclk);
if (ret) {
- dev_err(&pdev->dev, "Unable to prepare pclk clock\n");
+ dev_err(dev, "Unable to prepare pclk clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev,
+ stm32_clk_disable_unprepare,
+ wdt->clk_pclk);
+ if (ret)
+ return ret;
}
ret = clk_prepare_enable(wdt->clk_lsi);
if (ret) {
- dev_err(&pdev->dev, "Unable to prepare lsi clock\n");
- clk_disable_unprepare(wdt->clk_pclk);
+ dev_err(dev, "Unable to prepare lsi clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, stm32_clk_disable_unprepare,
+ wdt->clk_lsi);
+ if (ret)
+ return ret;
wdt->rate = clk_get_rate(wdt->clk_lsi);
@@ -191,35 +217,31 @@ static const struct watchdog_ops stm32_iwdg_ops = {
};
static const struct of_device_id stm32_iwdg_of_match[] = {
- { .compatible = "st,stm32-iwdg", .data = (void *)!HAS_PCLK },
- { .compatible = "st,stm32mp1-iwdg", .data = (void *)HAS_PCLK },
+ { .compatible = "st,stm32-iwdg", .data = &stm32_iwdg_data },
+ { .compatible = "st,stm32mp1-iwdg", .data = &stm32mp1_iwdg_data },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, stm32_iwdg_of_match);
static int stm32_iwdg_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
- const struct of_device_id *match;
struct stm32_iwdg *wdt;
- struct resource *res;
int ret;
- match = of_match_device(stm32_iwdg_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->has_pclk = match->data;
+ wdt->data = of_device_get_match_data(&pdev->dev);
+ if (!wdt->data)
+ return -ENODEV;
/* This is the timer base. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->regs = devm_ioremap_resource(&pdev->dev, res);
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->regs)) {
- dev_err(&pdev->dev, "Could not get resource\n");
+ dev_err(dev, "Could not get resource\n");
return PTR_ERR(wdt->regs);
}
@@ -229,50 +251,30 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
/* Initialize struct watchdog_device. */
wdd = &wdt->wdd;
+ wdd->parent = dev;
wdd->info = &stm32_iwdg_info;
wdd->ops = &stm32_iwdg_ops;
- wdd->min_timeout = ((RLR_MIN + 1) * 256) / wdt->rate;
- wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * 256 * 1000) / wdt->rate;
- wdd->parent = &pdev->dev;
+ wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
+ wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
+ 1000) / wdt->rate;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
+ watchdog_init_timeout(wdd, 0, dev);
- ret = watchdog_init_timeout(wdd, 0, &pdev->dev);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to set timeout value, using default\n");
-
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
- goto err;
+ dev_err(dev, "failed to register watchdog device\n");
+ return ret;
}
platform_set_drvdata(pdev, wdt);
return 0;
-err:
- clk_disable_unprepare(wdt->clk_lsi);
- clk_disable_unprepare(wdt->clk_pclk);
-
- return ret;
-}
-
-static int stm32_iwdg_remove(struct platform_device *pdev)
-{
- struct stm32_iwdg *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdd);
- clk_disable_unprepare(wdt->clk_lsi);
- clk_disable_unprepare(wdt->clk_pclk);
-
- return 0;
}
static struct platform_driver stm32_iwdg_driver = {
.probe = stm32_iwdg_probe,
- .remove = stm32_iwdg_remove,
.driver = {
.name = "iwdg",
.of_match_table = of_match_ptr(stm32_iwdg_of_match),
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index 994c54cc68e9..671f4ba7b4ed 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -89,31 +89,31 @@ static struct notifier_block wdt_notifier = {
static int stmp3xxx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev);
+ watchdog_set_drvdata(&stmp3xxx_wdd, dev);
stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
- stmp3xxx_wdd.parent = &pdev->dev;
+ stmp3xxx_wdd.parent = dev;
- ret = watchdog_register_device(&stmp3xxx_wdd);
+ ret = devm_watchdog_register_device(dev, &stmp3xxx_wdd);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot register watchdog device\n");
+ dev_err(dev, "cannot register watchdog device\n");
return ret;
}
if (register_reboot_notifier(&wdt_notifier))
- dev_warn(&pdev->dev, "cannot register reboot notifier\n");
+ dev_warn(dev, "cannot register reboot notifier\n");
- dev_info(&pdev->dev, "initialized watchdog with heartbeat %ds\n",
- stmp3xxx_wdd.timeout);
+ dev_info(dev, "initialized watchdog with heartbeat %ds\n",
+ stmp3xxx_wdd.timeout);
return 0;
}
static int stmp3xxx_wdt_remove(struct platform_device *pdev)
{
unregister_reboot_notifier(&wdt_notifier);
- watchdog_unregister_device(&stmp3xxx_wdd);
return 0;
}
diff --git a/drivers/watchdog/stpmic1_wdt.c b/drivers/watchdog/stpmic1_wdt.c
index ad431d8ad95f..45d0c543466f 100644
--- a/drivers/watchdog/stpmic1_wdt.c
+++ b/drivers/watchdog/stpmic1_wdt.c
@@ -81,18 +81,19 @@ static const struct watchdog_ops pmic_watchdog_ops = {
static int pmic_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct stpmic1 *pmic;
struct stpmic1_wdt *wdt;
- if (!pdev->dev.parent)
+ if (!dev->parent)
return -EINVAL;
- pmic = dev_get_drvdata(pdev->dev.parent);
+ pmic = dev_get_drvdata(dev->parent);
if (!pmic)
return -EINVAL;
- wdt = devm_kzalloc(&pdev->dev, sizeof(struct stpmic1_wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(struct stpmic1_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -102,15 +103,15 @@ static int pmic_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.ops = &pmic_watchdog_ops;
wdt->wdtdev.min_timeout = PMIC_WDT_MIN_TIMEOUT;
wdt->wdtdev.max_timeout = PMIC_WDT_MAX_TIMEOUT;
- wdt->wdtdev.parent = &pdev->dev;
+ wdt->wdtdev.parent = dev;
wdt->wdtdev.timeout = PMIC_WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(&wdt->wdtdev, 0, &pdev->dev);
+ watchdog_init_timeout(&wdt->wdtdev, 0, dev);
watchdog_set_nowayout(&wdt->wdtdev, nowayout);
watchdog_set_drvdata(&wdt->wdtdev, wdt);
- ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev);
+ ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
if (ret)
return ret;
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index c6c73656997e..9c22f7753c6b 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -233,20 +233,19 @@ MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
static int sunxi_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct sunxi_wdt_dev *sunxi_wdt;
- struct resource *res;
int err;
- sunxi_wdt = devm_kzalloc(&pdev->dev, sizeof(*sunxi_wdt), GFP_KERNEL);
+ sunxi_wdt = devm_kzalloc(dev, sizeof(*sunxi_wdt), GFP_KERNEL);
if (!sunxi_wdt)
return -EINVAL;
- sunxi_wdt->wdt_regs = of_device_get_match_data(&pdev->dev);
+ sunxi_wdt->wdt_regs = of_device_get_match_data(dev);
if (!sunxi_wdt->wdt_regs)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ sunxi_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sunxi_wdt->wdt_base))
return PTR_ERR(sunxi_wdt->wdt_base);
@@ -255,9 +254,9 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
sunxi_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
sunxi_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
sunxi_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
- sunxi_wdt->wdt_dev.parent = &pdev->dev;
+ sunxi_wdt->wdt_dev.parent = dev;
- watchdog_init_timeout(&sunxi_wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_init_timeout(&sunxi_wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&sunxi_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&sunxi_wdt->wdt_dev, 128);
@@ -266,12 +265,12 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
sunxi_wdt_stop(&sunxi_wdt->wdt_dev);
watchdog_stop_on_reboot(&sunxi_wdt->wdt_dev);
- err = devm_watchdog_register_device(&pdev->dev, &sunxi_wdt->wdt_dev);
+ err = devm_watchdog_register_device(dev, &sunxi_wdt->wdt_dev);
if (unlikely(err))
return err;
- dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
- sunxi_wdt->wdt_dev.timeout, nowayout);
+ dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
+ sunxi_wdt->wdt_dev.timeout, nowayout);
return 0;
}
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index d0b53f3c0d17..1afb0e9d808c 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -108,10 +108,14 @@ static const struct watchdog_ops tangox_wdt_ops = {
.restart = tangox_wdt_restart,
};
+static void tangox_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int tangox_wdt_probe(struct platform_device *pdev)
{
struct tangox_wdt_device *dev;
- struct resource *res;
u32 config;
int err;
@@ -119,8 +123,7 @@ static int tangox_wdt_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(&pdev->dev, res);
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->base))
return PTR_ERR(dev->base);
@@ -131,12 +134,14 @@ static int tangox_wdt_probe(struct platform_device *pdev)
err = clk_prepare_enable(dev->clk);
if (err)
return err;
+ err = devm_add_action_or_reset(&pdev->dev,
+ tangox_clk_disable_unprepare, dev->clk);
+ if (err)
+ return err;
dev->clk_rate = clk_get_rate(dev->clk);
- if (!dev->clk_rate) {
- err = -EINVAL;
- goto err;
- }
+ if (!dev->clk_rate)
+ return -EINVAL;
dev->wdt.parent = &pdev->dev;
dev->wdt.info = &tangox_wdt_info;
@@ -170,31 +175,16 @@ static int tangox_wdt_probe(struct platform_device *pdev)
watchdog_set_restart_priority(&dev->wdt, 128);
- err = watchdog_register_device(&dev->wdt);
+ watchdog_stop_on_unregister(&dev->wdt);
+ err = devm_watchdog_register_device(&pdev->dev, &dev->wdt);
if (err)
- goto err;
+ return err;
platform_set_drvdata(pdev, dev);
dev_info(&pdev->dev, "SMP86xx/SMP87xx watchdog registered\n");
return 0;
-
- err:
- clk_disable_unprepare(dev->clk);
- return err;
-}
-
-static int tangox_wdt_remove(struct platform_device *pdev)
-{
- struct tangox_wdt_device *dev = platform_get_drvdata(pdev);
-
- tangox_wdt_stop(&dev->wdt);
- clk_disable_unprepare(dev->clk);
-
- watchdog_unregister_device(&dev->wdt);
-
- return 0;
}
static const struct of_device_id tangox_wdt_dt_ids[] = {
@@ -206,7 +196,6 @@ MODULE_DEVICE_TABLE(of, tangox_wdt_dt_ids);
static struct platform_driver tangox_wdt_driver = {
.probe = tangox_wdt_probe,
- .remove = tangox_wdt_remove,
.driver = {
.name = "tangox-wdt",
.of_match_table = tangox_wdt_dt_ids,
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 877dd39bd41f..a58b000acc4f 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -181,15 +181,14 @@ static const struct watchdog_ops tegra_wdt_ops = {
static int tegra_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct tegra_wdt *wdt;
- struct resource *res;
void __iomem *regs;
int ret;
/* This is the timer base. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -197,7 +196,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
* Allocate our watchdog driver data, which has the
* struct watchdog_device nested within it.
*/
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -212,39 +211,27 @@ static int tegra_wdt_probe(struct platform_device *pdev)
wdd->ops = &tegra_wdt_ops;
wdd->min_timeout = MIN_WDT_TIMEOUT;
wdd->max_timeout = MAX_WDT_TIMEOUT;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
- ret = devm_watchdog_register_device(&pdev->dev, wdd);
+ watchdog_stop_on_unregister(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev,
- "failed to register watchdog device\n");
+ dev_err(dev, "failed to register watchdog device\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev,
- "initialized (heartbeat = %d sec, nowayout = %d)\n",
+ dev_info(dev, "initialized (heartbeat = %d sec, nowayout = %d)\n",
heartbeat, nowayout);
return 0;
}
-static int tegra_wdt_remove(struct platform_device *pdev)
-{
- struct tegra_wdt *wdt = platform_get_drvdata(pdev);
-
- tegra_wdt_stop(&wdt->wdd);
-
- dev_info(&pdev->dev, "removed wdt\n");
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int tegra_wdt_runtime_suspend(struct device *dev)
{
@@ -280,7 +267,6 @@ static const struct dev_pm_ops tegra_wdt_pm_ops = {
static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
- .remove = tegra_wdt_remove,
.driver = {
.name = "tegra-wdt",
.pm = &tegra_wdt_pm_ops,
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 52941207a12a..72d0b0adde38 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -70,11 +70,12 @@ static struct watchdog_ops tqmx86_wdt_ops = {
static int tqmx86_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct tqmx86_wdt *priv;
struct resource *res;
int err;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -82,14 +83,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- priv->io_base = devm_ioport_map(&pdev->dev, res->start,
- resource_size(res));
+ priv->io_base = devm_ioport_map(dev, res->start, resource_size(res));
if (!priv->io_base)
return -ENOMEM;
watchdog_set_drvdata(&priv->wdd, priv);
- priv->wdd.parent = &pdev->dev;
+ priv->wdd.parent = dev;
priv->wdd.info = &tqmx86_wdt_info;
priv->wdd.ops = &tqmx86_wdt_ops;
priv->wdd.min_timeout = 1;
@@ -97,16 +97,16 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
priv->wdd.max_hw_heartbeat_ms = 4096*1000;
priv->wdd.timeout = WDT_TIMEOUT;
- watchdog_init_timeout(&priv->wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(&priv->wdd, timeout, dev);
watchdog_set_nowayout(&priv->wdd, WATCHDOG_NOWAYOUT);
tqmx86_wdt_set_timeout(&priv->wdd, priv->wdd.timeout);
- err = devm_watchdog_register_device(&pdev->dev, &priv->wdd);
+ err = devm_watchdog_register_device(dev, &priv->wdd);
if (err)
return err;
- dev_info(&pdev->dev, "TQMx86 watchdog\n");
+ dev_info(dev, "TQMx86 watchdog\n");
return 0;
}
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index 89843b16b04a..9dc6d7f45806 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -108,7 +108,8 @@ static const struct watchdog_info ts4800_wdt_info = {
static int ts4800_wdt_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct device_node *syscon_np;
struct watchdog_device *wdd;
struct ts4800_wdt *wdt;
@@ -117,18 +118,18 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
syscon_np = of_parse_phandle(np, "syscon", 0);
if (!syscon_np) {
- dev_err(&pdev->dev, "no syscon property\n");
+ dev_err(dev, "no syscon property\n");
return -ENODEV;
}
ret = of_property_read_u32_index(np, "syscon", 1, &reg);
if (ret < 0) {
- dev_err(&pdev->dev, "no offset in syscon\n");
+ dev_err(dev, "no offset in syscon\n");
return ret;
}
/* allocate memory for watchdog struct */
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -137,13 +138,13 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
wdt->regmap = syscon_node_to_regmap(syscon_np);
of_node_put(syscon_np);
if (IS_ERR(wdt->regmap)) {
- dev_err(&pdev->dev, "cannot get parent's regmap\n");
+ dev_err(dev, "cannot get parent's regmap\n");
return PTR_ERR(wdt->regmap);
}
/* Initialize struct watchdog_device */
wdd = &wdt->wdd;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
wdd->info = &ts4800_wdt_info;
wdd->ops = &ts4800_wdt_ops;
wdd->min_timeout = ts4800_wdt_map[0].timeout;
@@ -151,7 +152,7 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
- watchdog_init_timeout(wdd, 0, &pdev->dev);
+ watchdog_init_timeout(wdd, 0, dev);
/*
* As this watchdog supports only a few values, ts4800_wdt_set_timeout
@@ -169,31 +170,20 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
*/
ts4800_wdt_stop(wdd);
- ret = watchdog_register_device(wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret) {
- dev_err(&pdev->dev,
- "failed to register watchdog device\n");
+ dev_err(dev, "failed to register watchdog device\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
- dev_info(&pdev->dev,
- "initialized (timeout = %d sec, nowayout = %d)\n",
+ dev_info(dev, "initialized (timeout = %d sec, nowayout = %d)\n",
wdd->timeout, nowayout);
return 0;
}
-static int ts4800_wdt_remove(struct platform_device *pdev)
-{
- struct ts4800_wdt *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdd);
-
- return 0;
-}
-
static const struct of_device_id ts4800_wdt_of_match[] = {
{ .compatible = "technologic,ts4800-wdt", },
{ },
@@ -202,7 +192,6 @@ MODULE_DEVICE_TABLE(of, ts4800_wdt_of_match);
static struct platform_driver ts4800_wdt_driver = {
.probe = ts4800_wdt_probe,
- .remove = ts4800_wdt_remove,
.driver = {
.name = "ts4800_wdt",
.of_match_table = ts4800_wdt_of_match,
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 811e43c39ec4..bf918f5fa131 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -122,22 +122,20 @@ static const struct watchdog_ops ts72xx_wdt_ops = {
static int ts72xx_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ts72xx_wdt_priv *priv;
struct watchdog_device *wdd;
- struct resource *res;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->control_reg = devm_ioremap_resource(&pdev->dev, res);
+ priv->control_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->control_reg))
return PTR_ERR(priv->control_reg);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->feed_reg = devm_ioremap_resource(&pdev->dev, res);
+ priv->feed_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->feed_reg))
return PTR_ERR(priv->feed_reg);
@@ -146,20 +144,20 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
wdd->ops = &ts72xx_wdt_ops;
wdd->min_timeout = 1;
wdd->max_hw_heartbeat_ms = 8000;
- wdd->parent = &pdev->dev;
+ wdd->parent = dev;
watchdog_set_nowayout(wdd, nowayout);
wdd->timeout = TS72XX_WDT_DEFAULT_TIMEOUT;
- watchdog_init_timeout(wdd, timeout, &pdev->dev);
+ watchdog_init_timeout(wdd, timeout, dev);
watchdog_set_drvdata(wdd, priv);
- ret = devm_watchdog_register_device(&pdev->dev, wdd);
+ ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
- dev_info(&pdev->dev, "TS-72xx Watchdog driver\n");
+ dev_info(dev, "TS-72xx Watchdog driver\n");
return 0;
}
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 569fe85e52da..74c5737cd934 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -70,10 +70,10 @@ static const struct watchdog_ops twl4030_wdt_ops = {
static int twl4030_wdt_probe(struct platform_device *pdev)
{
- int ret = 0;
+ struct device *dev = &pdev->dev;
struct watchdog_device *wdt;
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -83,27 +83,14 @@ static int twl4030_wdt_probe(struct platform_device *pdev)
wdt->timeout = 30;
wdt->min_timeout = 1;
wdt->max_timeout = 30;
- wdt->parent = &pdev->dev;
+ wdt->parent = dev;
watchdog_set_nowayout(wdt, nowayout);
platform_set_drvdata(pdev, wdt);
twl4030_wdt_stop(wdt);
- ret = watchdog_register_device(wdt);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int twl4030_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(wdt);
-
- return 0;
+ return devm_watchdog_register_device(dev, wdt);
}
#ifdef CONFIG_PM
@@ -137,7 +124,6 @@ MODULE_DEVICE_TABLE(of, twl_wdt_of_match);
static struct platform_driver twl4030_wdt_driver = {
.probe = twl4030_wdt_probe,
- .remove = twl4030_wdt_remove,
.suspend = twl4030_wdt_suspend,
.resume = twl4030_wdt_resume,
.driver = {
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 6f7a9deb27d0..fcb4da5b1f4c 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -103,7 +103,6 @@ static struct watchdog_device txx9wdt = {
static int __init txx9wdt_probe(struct platform_device *dev)
{
- struct resource *res;
int ret;
txx9_imclk = clk_get(NULL, "imbus_clk");
@@ -119,8 +118,7 @@ static int __init txx9wdt_probe(struct platform_device *dev)
goto exit;
}
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- txx9wdt_reg = devm_ioremap_resource(&dev->dev, res);
+ txx9wdt_reg = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(txx9wdt_reg)) {
ret = PTR_ERR(txx9wdt_reg);
goto exit;
diff --git a/drivers/watchdog/uniphier_wdt.c b/drivers/watchdog/uniphier_wdt.c
index e20a7a459d69..8e9242c23022 100644
--- a/drivers/watchdog/uniphier_wdt.c
+++ b/drivers/watchdog/uniphier_wdt.c
@@ -191,8 +191,6 @@ static int uniphier_wdt_probe(struct platform_device *pdev)
if (!wdev)
return -ENOMEM;
- platform_set_drvdata(pdev, wdev);
-
parent = of_get_parent(dev->of_node); /* parent should be syscon node */
regmap = syscon_node_to_regmap(parent);
of_node_put(parent);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index 37c084353cce..9fa7f95f7554 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -86,8 +86,9 @@ static struct watchdog_device ux500_wdt = {
static int ux500_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- struct ux500_wdt_data *pdata = dev_get_platdata(&pdev->dev);
+ struct ux500_wdt_data *pdata = dev_get_platdata(dev);
if (pdata) {
if (pdata->timeout > 0)
@@ -96,7 +97,7 @@ static int ux500_wdt_probe(struct platform_device *pdev)
ux500_wdt.max_timeout = WATCHDOG_MAX28;
}
- ux500_wdt.parent = &pdev->dev;
+ ux500_wdt.parent = dev;
watchdog_set_nowayout(&ux500_wdt, nowayout);
/* disable auto off on sleep */
@@ -105,18 +106,11 @@ static int ux500_wdt_probe(struct platform_device *pdev)
/* set HW initial value */
prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
- ret = watchdog_register_device(&ux500_wdt);
+ ret = devm_watchdog_register_device(dev, &ux500_wdt);
if (ret)
return ret;
- dev_info(&pdev->dev, "initialized\n");
-
- return 0;
-}
-
-static int ux500_wdt_remove(struct platform_device *dev)
-{
- watchdog_unregister_device(&ux500_wdt);
+ dev_info(dev, "initialized\n");
return 0;
}
@@ -153,7 +147,6 @@ static int ux500_wdt_resume(struct platform_device *pdev)
static struct platform_driver ux500_wdt_driver = {
.probe = ux500_wdt_probe,
- .remove = ux500_wdt_remove,
.suspend = ux500_wdt_suspend,
.resume = ux500_wdt_resume,
.driver = {
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index eb8fa25f8eb2..62be9e52a4de 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -105,34 +105,48 @@ static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
* timeout module parameter (if it is valid value) or the timeout-sec property
* (only if it is a valid value and the timeout_parm is out of bounds).
* If none of them are valid then we keep the old value (which should normally
- * be the default timeout value).
+ * be the default timeout value). Note that for the module parameter, '0' means
+ * 'use default' while it is an invalid value for the timeout-sec property.
+ * It should simply be dropped if you want to use the default value then.
*
- * A zero is returned on success and -EINVAL for failure.
+ * A zero is returned on success or -EINVAL if all provided values are out of
+ * bounds.
*/
int watchdog_init_timeout(struct watchdog_device *wdd,
unsigned int timeout_parm, struct device *dev)
{
+ const char *dev_str = wdd->parent ? dev_name(wdd->parent) :
+ (const char *)wdd->info->identity;
unsigned int t = 0;
int ret = 0;
watchdog_check_min_max_timeout(wdd);
- /* try to get the timeout module parameter first */
- if (!watchdog_timeout_invalid(wdd, timeout_parm) && timeout_parm) {
- wdd->timeout = timeout_parm;
- return ret;
- }
- if (timeout_parm)
+ /* check the driver supplied value (likely a module parameter) first */
+ if (timeout_parm) {
+ if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+ wdd->timeout = timeout_parm;
+ return 0;
+ }
+ pr_err("%s: driver supplied timeout (%u) out of range\n",
+ dev_str, timeout_parm);
ret = -EINVAL;
+ }
/* try to get the timeout_sec property */
- if (dev == NULL || dev->of_node == NULL)
- return ret;
- of_property_read_u32(dev->of_node, "timeout-sec", &t);
- if (!watchdog_timeout_invalid(wdd, t) && t)
- wdd->timeout = t;
- else
+ if (dev && dev->of_node &&
+ of_property_read_u32(dev->of_node, "timeout-sec", &t) == 0) {
+ if (t && !watchdog_timeout_invalid(wdd, t)) {
+ wdd->timeout = t;
+ return 0;
+ }
+ pr_err("%s: DT supplied timeout (%u) out of range\n", dev_str, t);
ret = -EINVAL;
+ }
+
+ if (ret < 0 && wdd->timeout)
+ pr_warn("%s: falling back to default timeout (%u)\n", dev_str,
+ wdd->timeout);
return ret;
}
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 56ad19608a9b..430ee4e9b185 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -287,7 +287,7 @@ static unsigned int wdat_wdt_get_timeleft(struct watchdog_device *wdd)
struct wdat_wdt *wdat = to_wdat_wdt(wdd);
u32 periods = 0;
- wdat_wdt_run_action(wdat, ACPI_WDAT_GET_COUNTDOWN, 0, &periods);
+ wdat_wdt_run_action(wdat, ACPI_WDAT_GET_CURRENT_COUNTDOWN, 0, &periods);
return periods * wdat->period / 1000;
}
@@ -308,6 +308,7 @@ static const struct watchdog_ops wdat_wdt_ops = {
static int wdat_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct acpi_wdat_entry *entries;
const struct acpi_table_wdat *tbl;
struct wdat_wdt *wdat;
@@ -321,11 +322,11 @@ static int wdat_wdt_probe(struct platform_device *pdev)
if (ACPI_FAILURE(status))
return -ENODEV;
- wdat = devm_kzalloc(&pdev->dev, sizeof(*wdat), GFP_KERNEL);
+ wdat = devm_kzalloc(dev, sizeof(*wdat), GFP_KERNEL);
if (!wdat)
return -ENOMEM;
- regs = devm_kcalloc(&pdev->dev, pdev->num_resources, sizeof(*regs),
+ regs = devm_kcalloc(dev, pdev->num_resources, sizeof(*regs),
GFP_KERNEL);
if (!regs)
return -ENOMEM;
@@ -350,15 +351,15 @@ static int wdat_wdt_probe(struct platform_device *pdev)
res = &pdev->resource[i];
if (resource_type(res) == IORESOURCE_MEM) {
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_ioremap_resource(dev, res);
if (IS_ERR(reg))
return PTR_ERR(reg);
} else if (resource_type(res) == IORESOURCE_IO) {
- reg = devm_ioport_map(&pdev->dev, res->start, 1);
+ reg = devm_ioport_map(dev, res->start, 1);
if (!reg)
return -ENOMEM;
} else {
- dev_err(&pdev->dev, "Unsupported resource\n");
+ dev_err(dev, "Unsupported resource\n");
return -EINVAL;
}
@@ -376,12 +377,11 @@ static int wdat_wdt_probe(struct platform_device *pdev)
action = entries[i].action;
if (action >= MAX_WDAT_ACTIONS) {
- dev_dbg(&pdev->dev, "Skipping unknown action: %u\n",
- action);
+ dev_dbg(dev, "Skipping unknown action: %u\n", action);
continue;
}
- instr = devm_kzalloc(&pdev->dev, sizeof(*instr), GFP_KERNEL);
+ instr = devm_kzalloc(dev, sizeof(*instr), GFP_KERNEL);
if (!instr)
return -ENOMEM;
@@ -398,7 +398,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
r.flags = IORESOURCE_IO;
} else {
- dev_dbg(&pdev->dev, "Unsupported address space: %d\n",
+ dev_dbg(dev, "Unsupported address space: %d\n",
gas->space_id);
continue;
}
@@ -413,14 +413,15 @@ static int wdat_wdt_probe(struct platform_device *pdev)
}
if (!instr->reg) {
- dev_err(&pdev->dev, "I/O resource not found\n");
+ dev_err(dev, "I/O resource not found\n");
return -EINVAL;
}
instructions = wdat->instructions[action];
if (!instructions) {
- instructions = devm_kzalloc(&pdev->dev,
- sizeof(*instructions), GFP_KERNEL);
+ instructions = devm_kzalloc(dev,
+ sizeof(*instructions),
+ GFP_KERNEL);
if (!instructions)
return -ENOMEM;
@@ -441,7 +442,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdat);
watchdog_set_nowayout(&wdat->wdd, nowayout);
- return devm_watchdog_register_device(&pdev->dev, &wdat->wdd);
+ return devm_watchdog_register_device(dev, &wdat->wdd);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 116c2f47b463..9b6565a3fab4 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -180,8 +180,9 @@ static const struct watchdog_ops wm831x_wdt_ops = {
static int wm831x_wdt_probe(struct platform_device *pdev)
{
- struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *chip_pdata = dev_get_platdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct wm831x *wm831x = dev_get_drvdata(dev->parent);
+ struct wm831x_pdata *chip_pdata = dev_get_platdata(dev->parent);
struct wm831x_watchdog_pdata *pdata;
struct wm831x_wdt_drvdata *driver_data;
struct watchdog_device *wm831x_wdt;
@@ -198,8 +199,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
- driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
- GFP_KERNEL);
+ driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
@@ -210,7 +210,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->info = &wm831x_wdt_info;
wm831x_wdt->ops = &wm831x_wdt_ops;
- wm831x_wdt->parent = &pdev->dev;
+ wm831x_wdt->parent = dev;
watchdog_set_nowayout(wm831x_wdt, nowayout);
watchdog_set_drvdata(wm831x_wdt, driver_data);
@@ -240,10 +240,9 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT;
if (pdata->update_gpio) {
- ret = devm_gpio_request_one(&pdev->dev,
- pdata->update_gpio,
- GPIOF_OUT_INIT_LOW,
- "Watchdog update");
+ ret = devm_gpio_request_one(dev, pdata->update_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "Watchdog update");
if (ret < 0) {
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
@@ -268,7 +267,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
}
}
- ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt);
+ ret = devm_watchdog_register_device(dev, &driver_data->wdt);
if (ret != 0) {
dev_err(wm831x->dev, "watchdog_register_device() failed: %d\n",
ret);
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
index f1c016d015b3..25a1af5e1787 100644
--- a/drivers/watchdog/xen_wdt.c
+++ b/drivers/watchdog/xen_wdt.c
@@ -122,35 +122,33 @@ static struct watchdog_device xen_wdt_dev = {
static int xen_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct sched_watchdog wd = { .id = ~0 };
int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd);
if (ret == -ENOSYS) {
- dev_err(&pdev->dev, "watchdog not supported by hypervisor\n");
+ dev_err(dev, "watchdog not supported by hypervisor\n");
return -ENODEV;
}
if (ret != -EINVAL) {
- dev_err(&pdev->dev, "unexpected hypervisor error (%d)\n", ret);
+ dev_err(dev, "unexpected hypervisor error (%d)\n", ret);
return -ENODEV;
}
- if (watchdog_init_timeout(&xen_wdt_dev, timeout, NULL))
- dev_info(&pdev->dev, "timeout value invalid, using %d\n",
- xen_wdt_dev.timeout);
+ watchdog_init_timeout(&xen_wdt_dev, timeout, NULL);
watchdog_set_nowayout(&xen_wdt_dev, nowayout);
watchdog_stop_on_reboot(&xen_wdt_dev);
watchdog_stop_on_unregister(&xen_wdt_dev);
- ret = devm_watchdog_register_device(&pdev->dev, &xen_wdt_dev);
+ ret = devm_watchdog_register_device(dev, &xen_wdt_dev);
if (ret) {
- dev_err(&pdev->dev, "cannot register watchdog device (%d)\n",
- ret);
+ dev_err(dev, "cannot register watchdog device (%d)\n", ret);
return ret;
}
- dev_info(&pdev->dev, "initialized (timeout=%ds, nowayout=%d)\n",
- xen_wdt_dev.timeout, nowayout);
+ dev_info(dev, "initialized (timeout=%ds, nowayout=%d)\n",
+ xen_wdt_dev.timeout, nowayout);
return 0;
}
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index d3594aa3a374..43e6b575c32c 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -658,11 +658,7 @@ static int ziirave_wdt_probe(struct i2c_client *client,
w_priv->wdd.parent = &client->dev;
w_priv->wdd.groups = ziirave_wdt_groups;
- ret = watchdog_init_timeout(&w_priv->wdd, wdt_timeout, &client->dev);
- if (ret) {
- dev_info(&client->dev,
- "Unable to select timeout value, using default\n");
- }
+ watchdog_init_timeout(&w_priv->wdd, wdt_timeout, &client->dev);
/*
* The default value set in the watchdog should be perfectly valid, so
diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c
index 9261f7c77f6d..c8549bf07cc9 100644
--- a/drivers/watchdog/zx2967_wdt.c
+++ b/drivers/watchdog/zx2967_wdt.c
@@ -188,11 +188,15 @@ static void zx2967_wdt_reset_sysctrl(struct device *dev)
of_node_put(out_args.np);
}
+static void zx2967_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int zx2967_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx2967_wdt *wdt;
- struct resource *base;
int ret;
struct reset_control *rstc;
@@ -207,10 +211,9 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
wdt->wdt_device.timeout = ZX2967_WDT_DEFAULT_TIMEOUT;
wdt->wdt_device.max_timeout = ZX2967_WDT_MAX_TIMEOUT;
wdt->wdt_device.min_timeout = ZX2967_WDT_MIN_TIMEOUT;
- wdt->wdt_device.parent = &pdev->dev;
+ wdt->wdt_device.parent = dev;
- base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->reg_base = devm_ioremap_resource(dev, base);
+ wdt->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->reg_base))
return PTR_ERR(wdt->reg_base);
@@ -227,13 +230,16 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
dev_err(dev, "failed to enable clock\n");
return ret;
}
+ ret = devm_add_action_or_reset(dev, zx2967_clk_disable_unprepare,
+ wdt->clock);
+ if (ret)
+ return ret;
clk_set_rate(wdt->clock, ZX2967_WDT_CLK_FREQ);
rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(rstc)) {
dev_err(dev, "failed to get rstc");
- ret = PTR_ERR(rstc);
- goto err;
+ return PTR_ERR(rstc);
}
reset_control_assert(rstc);
@@ -244,28 +250,14 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
ZX2967_WDT_DEFAULT_TIMEOUT, dev);
watchdog_set_nowayout(&wdt->wdt_device, WATCHDOG_NOWAYOUT);
- ret = watchdog_register_device(&wdt->wdt_device);
+ ret = devm_watchdog_register_device(dev, &wdt->wdt_device);
if (ret)
- goto err;
+ return ret;
dev_info(dev, "watchdog enabled (timeout=%d sec, nowayout=%d)",
wdt->wdt_device.timeout, WATCHDOG_NOWAYOUT);
return 0;
-
-err:
- clk_disable_unprepare(wdt->clock);
- return ret;
-}
-
-static int zx2967_wdt_remove(struct platform_device *pdev)
-{
- struct zx2967_wdt *wdt = platform_get_drvdata(pdev);
-
- watchdog_unregister_device(&wdt->wdt_device);
- clk_disable_unprepare(wdt->clock);
-
- return 0;
}
static const struct of_device_id zx2967_wdt_match[] = {
@@ -276,7 +268,6 @@ MODULE_DEVICE_TABLE(of, zx2967_wdt_match);
static struct platform_driver zx2967_wdt_driver = {
.probe = zx2967_wdt_probe,
- .remove = zx2967_wdt_remove,
.driver = {
.name = "zx2967-wdt",
.of_match_table = of_match_ptr(zx2967_wdt_match),
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 7cf9c51318aa..469dfbd6cf90 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -526,20 +526,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
struct gntdev_grant_map *map;
int ret = 0;
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
mutex_lock(&priv->lock);
else if (!mutex_trylock(&priv->lock))
return -EAGAIN;
list_for_each_entry(map, &priv->maps, next) {
ret = unmap_if_in_range(map, range->start, range->end,
- range->blockable);
+ mmu_notifier_range_blockable(range));
if (ret)
goto out_unlock;
}
list_for_each_entry(map, &priv->freeable_maps, next) {
ret = unmap_if_in_range(map, range->start, range->end,
- range->blockable);
+ mmu_notifier_range_blockable(range));
if (ret)
goto out_unlock;
}
@@ -852,7 +852,7 @@ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
unsigned long xen_pfn;
int ret;
- ret = get_user_pages_fast(addr, 1, writeable, &page);
+ ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
if (ret < 0)
return ret;
@@ -1084,7 +1084,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = vma_pages(vma);
struct gntdev_grant_map *map;
- int i, err = -EINVAL;
+ int err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -1145,12 +1145,9 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto out_put_map;
if (!use_ptemod) {
- for (i = 0; i < count; i++) {
- err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
- map->pages[i]);
- if (err)
- goto out_put_map;
- }
+ err = vm_map_pages(vma, map->pages, map->count);
+ if (err)
+ goto out_put_map;
} else {
#ifdef CONFIG_X86
/*
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index a1c61e351d3f..dd5bbb6e1b6b 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -165,12 +165,8 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (vma_priv->n_pages != count)
ret = -ENOMEM;
else
- for (i = 0; i < vma_priv->n_pages; i++) {
- ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
- vma_priv->pages[i]);
- if (ret)
- break;
- }
+ ret = vm_map_pages_zero(vma, vma_priv->pages,
+ vma_priv->n_pages);
if (ret)
privcmd_buf_vmapriv_free(vma_priv);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 23f7f6ec7d1f..833b2d2c4318 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -697,7 +697,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
/* We need to force a call to our callback here in case
* xend already configured us!
*/
- xen_pcibk_be_watch(&pdev->be_watch, NULL, 0);
+ xen_pcibk_be_watch(&pdev->be_watch, NULL, NULL);
out:
return err;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0782ff3c2273..faf452d0edf0 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -465,7 +465,6 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
struct watch_adapter *watch;
char *path, *token;
int err, rc;
- LIST_HEAD(staging_q);
path = u->u.buffer + sizeof(u->u.msg);
token = memchr(path, 0, u->u.msg.len);
@@ -523,7 +522,6 @@ static ssize_t xenbus_file_write(struct file *filp,
uint32_t msg_type;
int rc = len;
int ret;
- LIST_HEAD(staging_q);
/*
* We're expecting usermode to be writing properly formed