aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/button.c13
-rw-r--r--drivers/acpi/ec.c36
-rw-r--r--drivers/acpi/osl.c28
-rw-r--r--drivers/acpi/sleep.c26
-rw-r--r--drivers/acpi/sysfs.c6
-rw-r--r--drivers/ata/pata_arasan_cf.c1
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/auxdisplay/charlcd.c34
-rw-r--r--drivers/base/Kconfig4
-rw-r--r--drivers/base/node.c9
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/qos-test.c117
-rw-r--r--drivers/base/power/qos.c73
-rw-r--r--drivers/base/power/wakeup.c54
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/block/null_blk_main.c40
-rw-r--r--drivers/block/rbd.c467
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/common.h3
-rw-r--r--drivers/block/xen-blkback/xenbus.c11
-rw-r--r--drivers/bus/Kconfig5
-rw-r--r--drivers/bus/hisi_lpc.c9
-rw-r--r--drivers/bus/ti-sysc.c108
-rw-r--r--drivers/char/agp/frontend.c3
-rw-r--r--drivers/char/agp/generic.c12
-rw-r--r--drivers/clk/Kconfig5
-rw-r--r--drivers/clk/mmp/Makefile2
-rw-r--r--drivers/clk/qcom/clk-rpmh.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c2
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/asm9260_timer.c4
-rw-r--r--drivers/clocksource/renesas-ostm.c189
-rw-r--r--drivers/clocksource/timer-of.c6
-rw-r--r--drivers/cpufreq/Kconfig.powerpc8
-rw-r--r--drivers/cpufreq/Kconfig.x8616
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c2
-rw-r--r--drivers/cpufreq/cpufreq_performance.c2
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c2
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c2
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c59
-rw-r--r--drivers/cpuidle/Kconfig16
-rw-r--r--drivers/cpuidle/Kconfig.arm22
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/cpuidle/poll_state.c1
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c4
-rw-r--r--drivers/devfreq/devfreq.c4
-rw-r--r--drivers/dma/Kconfig88
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/dma-jz4780.c16
-rw-r--r--drivers/dma/dw/platform.c2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/Kconfig9
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/Makefile3
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c825
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h153
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c376
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h177
-rw-r--r--drivers/dma/fsl-qdma.c3
-rw-r--r--drivers/dma/iop-adma.c10
-rw-r--r--drivers/dma/k3dma.c7
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c10
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c9
-rw-r--r--drivers/dma/milbeaut-hdmac.c578
-rw-r--r--drivers/dma/milbeaut-xdmac.c415
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c3
-rw-r--r--drivers/dma/owl-dma.c7
-rw-r--r--drivers/dma/sf-pdma/Kconfig6
-rw-r--r--drivers/dma/sf-pdma/Makefile1
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c620
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h124
-rw-r--r--drivers/dma/sh/rcar-dmac.c47
-rw-r--r--drivers/dma/sprd-dma.c17
-rw-r--r--drivers/dma/ti/edma.c77
-rw-r--r--drivers/dma/uniphier-mdmac.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c649
-rw-r--r--drivers/dma/zx_dma.c8
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/core-iso.c7
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/arm_scmi/bus.c8
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/dmi_scan.c41
-rw-r--r--drivers/firmware/imx/imx-dsp.c2
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c1
-rw-r--r--drivers/firmware/imx/imx-scu.c24
-rw-r--r--drivers/firmware/meson/meson_sm.c110
-rw-r--r--drivers/firmware/qcom_scm-32.c57
-rw-r--r--drivers/firmware/qcom_scm-64.c165
-rw-r--r--drivers/firmware/qcom_scm.c59
-rw-r--r--drivers/firmware/qcom_scm.h14
-rw-r--r--drivers/firmware/tegra/bpmp.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c8
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c73
-rw-r--r--drivers/gpio/gpio-104-idi-48.c36
-rw-r--r--drivers/gpio/gpio-74x164.c19
-rw-r--r--drivers/gpio/gpio-gpio-mm.c73
-rw-r--r--drivers/gpio/gpio-max3191x.c19
-rw-r--r--drivers/gpio/gpio-pca953x.c195
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c75
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c109
-rw-r--r--drivers/gpio/gpio-pisosr.c12
-rw-r--r--drivers/gpio/gpio-uniphier.c13
-rw-r--r--drivers/gpio/gpio-ws16c48.c73
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c178
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c1
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_property.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c67
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c83
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c5
-rw-r--r--drivers/gpu/drm/i915/i915_active.c5
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_query.c7
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c21
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h45
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c36
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h18
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c3
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c28
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c79
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c40
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c60
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c114
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c23
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c60
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c94
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c97
-rw-r--r--drivers/gpu/drm/tegra/dc.c18
-rw-r--r--drivers/gpu/drm/tegra/drm.c7
-rw-r--r--drivers/gpu/drm/tegra/gem.c50
-rw-r--r--drivers/gpu/drm/tegra/hub.c3
-rw-r--r--drivers/gpu/drm/tegra/plane.c11
-rw-r--r--drivers/gpu/drm/tegra/sor.c38
-rw-r--r--drivers/gpu/drm/tegra/vic.c7
-rw-r--r--drivers/idle/intel_idle.c6
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c6
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c6
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c4
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c6
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c6
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c48
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c3
-rw-r--r--drivers/input/rmi4/rmi_smbus.c2
-rw-r--r--drivers/input/touchscreen/goodix.c9
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c893
-rw-r--r--drivers/iommu/amd_iommu_types.h3
-rw-r--r--drivers/iommu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm-smmu-qcom.c51
-rw-r--r--drivers/iommu/arm-smmu-v3.c12
-rw-r--r--drivers/iommu/arm-smmu.c223
-rw-r--r--drivers/iommu/arm-smmu.h16
-rw-r--r--drivers/iommu/dma-iommu.c43
-rw-r--r--drivers/iommu/dmar.c5
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c61
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c15
-rw-r--r--drivers/iommu/io-pgtable-arm.c130
-rw-r--r--drivers/iommu/ioasid.c422
-rw-r--r--drivers/iommu/iommu.c73
-rw-r--r--drivers/iommu/ipmmu-vmsa.c223
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c90
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/qcom_iommu.c10
-rw-r--r--drivers/iommu/rockchip-iommu.c11
-rw-r--r--drivers/iommu/s390-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c38
-rw-r--r--drivers/iommu/virtio-iommu.c5
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c119
-rw-r--r--drivers/irqchip/irq-gic-v2m.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c302
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c85
-rw-r--r--drivers/irqchip/irq-ls-extirq.c197
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c5
-rw-r--r--drivers/irqchip/irq-zevio.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c149
-rw-r--r--drivers/md/dm-table.c12
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/memory/atmel-ebi.c11
-rw-r--r--drivers/memory/brcmstb_dpfe.c164
-rw-r--r--drivers/memory/emif.c5
-rw-r--r--drivers/memory/jedec_ddr.h61
-rw-r--r--drivers/memory/mtk-smi.c4
-rw-r--r--drivers/memory/of_memory.c149
-rw-r--r--drivers/memory/of_memory.h18
-rw-r--r--drivers/memory/samsung/Kconfig13
-rw-r--r--drivers/memory/samsung/Makefile1
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c1550
-rw-r--r--drivers/memory/tegra/Kconfig10
-rw-r--r--drivers/memory/tegra/Makefile1
-rw-r--r--drivers/memory/tegra/mc.c52
-rw-r--r--drivers/memory/tegra/mc.h74
-rw-r--r--drivers/memory/tegra/tegra114.c10
-rw-r--r--drivers/memory/tegra/tegra124.c30
-rw-r--r--drivers/memory/tegra/tegra20-emc.c134
-rw-r--r--drivers/memory/tegra/tegra30-emc.c1232
-rw-r--r--drivers/memory/tegra/tegra30.c34
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/mfd/cros_ec_dev.c235
-rw-r--r--drivers/misc/pci_endpoint_test.c8
-rw-r--r--drivers/misc/sram-exec.c2
-rw-r--r--drivers/mtd/nand/onenand/Makefile2
-rw-r--r--drivers/mtd/nand/onenand/samsung_mtd.c (renamed from drivers/mtd/nand/onenand/samsung.c)0
-rw-r--r--drivers/mtd/ubi/debug.c1
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c31
-rw-r--r--drivers/mtd/ubi/fastmap.c14
-rw-r--r--drivers/mtd/ubi/ubi.h8
-rw-r--r--drivers/mtd/ubi/wl.c32
-rw-r--r--drivers/mtd/ubi/wl.h1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c2
-rw-r--r--drivers/net/wan/z85230.h2
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c1
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/nvmem/meson-efuse.c24
-rw-r--r--drivers/of/address.c103
-rw-r--r--drivers/of/base.c32
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/of_private.h14
-rw-r--r--drivers/of/overlay.c37
-rw-r--r--drivers/of/property.c8
-rw-r--r--drivers/of/unittest-data/testcases.dts1
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi48
-rw-r--r--drivers/of/unittest.c96
-rw-r--r--drivers/pci/Kconfig26
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c2
-rw-r--r--drivers/pci/ats.c207
-rw-r--r--drivers/pci/controller/Kconfig31
-rw-r--r--drivers/pci/controller/Makefile4
-rw-r--r--drivers/pci/controller/cadence/Kconfig45
-rw-r--r--drivers/pci/controller/cadence/Makefile5
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c (renamed from drivers/pci/controller/pcie-cadence-ep.c)96
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c (renamed from drivers/pci/controller/pcie-cadence-host.c)97
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c174
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c (renamed from drivers/pci/controller/pcie-cadence.c)0
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h (renamed from drivers/pci/controller/pcie-cadence.h)79
-rw-r--r--drivers/pci/controller/dwc/Kconfig6
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c136
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c41
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c10
-rw-r--r--drivers/pci/controller/pci-aardvark.c133
-rw-r--r--drivers/pci/controller/pci-ftpci100.c79
-rw-r--r--drivers/pci/controller/pci-host-common.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c218
-rw-r--r--drivers/pci/controller/pci-mvebu.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c1
-rw-r--r--drivers/pci/controller/pci-v3-semi.c74
-rw-r--r--drivers/pci/controller/pci-versatile.c71
-rw-r--r--drivers/pci/controller/pci-xgene.c73
-rw-r--r--drivers/pci/controller/pcie-altera.c41
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c5
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c9
-rw-r--r--drivers/pci/controller/pcie-iproc.c106
-rw-r--r--drivers/pci/controller/pcie-mediatek.c43
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c146
-rw-r--r--drivers/pci/controller/pcie-rcar.c92
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c158
-rw-r--r--drivers/pci/controller/pcie-rockchip.h7
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c21
-rw-r--r--drivers/pci/controller/pcie-xilinx.c18
-rw-r--r--drivers/pci/controller/vmd.c34
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c10
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c2
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c12
-rw-r--r--drivers/pci/hotplug/pciehp.h8
-rw-r--r--drivers/pci/hotplug/pciehp_core.c36
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c10
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c67
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c4
-rw-r--r--drivers/pci/iov.c9
-rw-r--r--drivers/pci/msi.c25
-rw-r--r--drivers/pci/of.c67
-rw-r--r--drivers/pci/pci-bridge-emul.c25
-rw-r--r--drivers/pci/pci-bridge-emul.h78
-rw-r--r--drivers/pci/pci-driver.c198
-rw-r--r--drivers/pci/pci-sysfs.c28
-rw-r--r--drivers/pci/pci.c372
-rw-r--r--drivers/pci/pci.h48
-rw-r--r--drivers/pci/pcie/Kconfig10
-rw-r--r--drivers/pci/pcie/aer.c88
-rw-r--r--drivers/pci/pcie/aspm.c245
-rw-r--r--drivers/pci/pcie/dpc.c2
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_core.c7
-rw-r--r--drivers/pci/pcie/portdrv_pci.c8
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pci/probe.c60
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/quirks.c157
-rw-r--r--drivers/pci/setup-bus.c70
-rw-r--r--drivers/pci/switch/switchtec.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c70
-rw-r--r--drivers/phy/marvell/Kconfig11
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-mmp3-usb.c291
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c112
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c23
-rw-r--r--drivers/platform/chrome/Kconfig19
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/cros_ec.c84
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c25
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c17
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c267
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c19
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c199
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig2
-rw-r--r--drivers/platform/chrome/wilco_ec/Makefile3
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c28
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c47
-rw-r--r--drivers/platform/chrome/wilco_ec/keyboard_leds.c191
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c91
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/dell_rbu.c2
-rw-r--r--drivers/power/avs/Kconfig12
-rw-r--r--drivers/pwm/pwm-stm32.c112
-rw-r--r--drivers/pwm/pwm-sun4i.c5
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/rapidio/rio-access.c2
-rw-r--r--drivers/rapidio/rio-driver.c1
-rw-r--r--drivers/reset/Kconfig5
-rw-r--r--drivers/reset/core.c8
-rw-r--r--drivers/reset/hisilicon/reset-hi3660.c2
-rw-r--r--drivers/reset/reset-meson-audio-arb.c43
-rw-r--r--drivers/reset/reset-meson.c35
-rw-r--r--drivers/reset/reset-uniphier-glue.c4
-rw-r--r--drivers/reset/reset-zynqmp.c2
-rw-r--r--drivers/rtc/Kconfig31
-rw-r--r--drivers/rtc/interface.c58
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c11
-rw-r--r--drivers/rtc/rtc-armada38x.c10
-rw-r--r--drivers/rtc/rtc-asm9260.c4
-rw-r--r--drivers/rtc/rtc-aspeed.c4
-rw-r--r--drivers/rtc/rtc-at91rm9200.c19
-rw-r--r--drivers/rtc/rtc-at91sam9.c4
-rw-r--r--drivers/rtc/rtc-bd70528.c1
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c5
-rw-r--r--drivers/rtc/rtc-cadence.c4
-rw-r--r--drivers/rtc/rtc-coh901331.c4
-rw-r--r--drivers/rtc/rtc-cros-ec.c22
-rw-r--r--drivers/rtc/rtc-da9063.c3
-rw-r--r--drivers/rtc/rtc-davinci.c4
-rw-r--r--drivers/rtc/rtc-digicolor.c4
-rw-r--r--drivers/rtc/rtc-ds1216.c4
-rw-r--r--drivers/rtc/rtc-ds1286.c4
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1343.c297
-rw-r--r--drivers/rtc/rtc-ds1347.c102
-rw-r--r--drivers/rtc/rtc-ds1374.c3
-rw-r--r--drivers/rtc/rtc-ds1511.c4
-rw-r--r--drivers/rtc/rtc-ds1553.c4
-rw-r--r--drivers/rtc/rtc-ds1685.c116
-rw-r--r--drivers/rtc/rtc-em3027.c4
-rw-r--r--drivers/rtc/rtc-ep93xx.c4
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c24
-rw-r--r--drivers/rtc/rtc-goldfish.c8
-rw-r--r--drivers/rtc/rtc-jz4740.c4
-rw-r--r--drivers/rtc/rtc-lpc24xx.c4
-rw-r--r--drivers/rtc/rtc-lpc32xx.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/rtc/rtc-m48t86.c11
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c15
-rw-r--r--drivers/rtc/rtc-meson.c6
-rw-r--r--drivers/rtc/rtc-msm6242.c23
-rw-r--r--drivers/rtc/rtc-mt7622.c4
-rw-r--r--drivers/rtc/rtc-mv.c4
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/rtc/rtc-pcf2127.c10
-rw-r--r--drivers/rtc/rtc-pcf8523.c18
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pic32.c4
-rw-r--r--drivers/rtc/rtc-pm8xxx.c2
-rw-r--r--drivers/rtc/rtc-r7301.c7
-rw-r--r--drivers/rtc/rtc-rtd119x.c4
-rw-r--r--drivers/rtc/rtc-rv3028.c146
-rw-r--r--drivers/rtc/rtc-rx6110.c16
-rw-r--r--drivers/rtc/rtc-s35390a.c16
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sc27xx.c7
-rw-r--r--drivers/rtc/rtc-sirfsoc.c8
-rw-r--r--drivers/rtc/rtc-spear.c4
-rw-r--r--drivers/rtc/rtc-st-lpc.c5
-rw-r--r--drivers/rtc/rtc-stk17ta8.c4
-rw-r--r--drivers/rtc/rtc-stm32.c4
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-sunxi.c4
-rw-r--r--drivers/rtc/rtc-tegra.c8
-rw-r--r--drivers/rtc/rtc-tps65910.c21
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/rtc/rtc-v3020.c3
-rw-r--r--drivers/rtc/rtc-vt8500.c32
-rw-r--r--drivers/rtc/rtc-wilco-ec.c8
-rw-r--r--drivers/rtc/rtc-xgene.c6
-rw-r--r--drivers/rtc/rtc-zynqmp.c7
-rw-r--r--drivers/rtc/sysfs.c5
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c8
-rw-r--r--drivers/s390/scsi/zfcp_def.h4
-rw-r--r--drivers/s390/scsi/zfcp_diag.c305
-rw-r--r--drivers/s390/scsi/zfcp_diag.h101
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c73
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c170
-rw-r--r--drivers/scsi/NCR5380.c37
-rw-r--r--drivers/scsi/aacraid/aachba.c11
-rw-r--r--drivers/scsi/aacraid/aacraid.h23
-rw-r--r--drivers/scsi/aacraid/comminit.c5
-rw-r--r--drivers/scsi/aacraid/commsup.c21
-rw-r--r--drivers/scsi/aacraid/linit.c35
-rw-r--r--drivers/scsi/aacraid/src.c10
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c4
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfad.c3
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c20
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c18
-rw-r--r--drivers/scsi/csiostor/csio_mb.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c28
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c376
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c30
-rw-r--r--drivers/scsi/hosts.c19
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h40
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h31
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c956
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c391
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c36
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h15
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c344
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c133
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c38
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c70
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h24
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c450
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h3
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c78
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c146
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c174
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c3
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_lib.c45
-rw-r--r--drivers/scsi/scsi_logging.c10
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_trace.c124
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/scsi/sd.c13
-rw-r--r--drivers/scsi/sd.h3
-rw-r--r--drivers/scsi/sd_zbc.c10
-rw-r--r--drivers/scsi/sg.c91
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h77
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c437
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c22
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/ufs/Kconfig10
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c90
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c5
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c53
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h3
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c15
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c1
-rw-r--r--drivers/scsi/ufs/ufshcd.c214
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h2
-rw-r--r--drivers/scsi/zorro_esp.c11
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c4
-rw-r--r--drivers/soc/atmel/Kconfig11
-rw-r--r--drivers/soc/atmel/Makefile1
-rw-r--r--drivers/soc/atmel/sfr.c99
-rw-r--r--drivers/soc/fsl/Kconfig10
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/rcpm.c151
-rw-r--r--drivers/soc/imx/soc-imx-scu.c34
-rw-r--r--drivers/soc/imx/soc-imx8.c49
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c214
-rw-r--r--drivers/soc/qcom/Kconfig24
-rw-r--r--drivers/soc/qcom/Makefile4
-rw-r--r--drivers/soc/qcom/llcc-qcom.c (renamed from drivers/soc/qcom/llcc-slice.c)130
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c100
-rw-r--r--drivers/soc/qcom/ocmem.c433
-rw-r--r--drivers/soc/qcom/qcom_aoss.c8
-rw-r--r--drivers/soc/qcom/rpmpd.c23
-rw-r--r--drivers/soc/qcom/smd-rpm.c18
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/renesas/Kconfig32
-rw-r--r--drivers/soc/renesas/Makefile4
-rw-r--r--drivers/soc/renesas/r8a7743-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a7745-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a77470-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a774a1-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a774b1-sysc.c37
-rw-r--r--drivers/soc/renesas/r8a774c0-sysc.c4
-rw-r--r--drivers/soc/renesas/r8a7779-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a7790-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a7791-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a7792-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a7794-sysc.c1
-rw-r--r--drivers/soc/renesas/r8a7795-sysc.c33
-rw-r--r--drivers/soc/renesas/r8a7796-sysc.c30
-rw-r--r--drivers/soc/renesas/r8a77965-sysc.c4
-rw-r--r--drivers/soc/renesas/r8a77970-sysc.c4
-rw-r--r--drivers/soc/renesas/r8a77980-sysc.c4
-rw-r--r--drivers/soc/renesas/r8a77990-sysc.c4
-rw-r--r--drivers/soc/renesas/r8a77995-sysc.c1
-rw-r--r--drivers/soc/renesas/rcar-rst.c2
-rw-r--r--drivers/soc/renesas/rcar-sysc.c26
-rw-r--r--drivers/soc/renesas/rcar-sysc.h9
-rw-r--r--drivers/soc/renesas/renesas-soc.c15
-rw-r--r--drivers/soc/samsung/Kconfig10
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-asv.c177
-rw-r--r--drivers/soc/samsung/exynos-asv.h71
-rw-r--r--drivers/soc/samsung/exynos-chipid.c12
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c505
-rw-r--r--drivers/soc/samsung/exynos5422-asv.h31
-rw-r--r--drivers/soc/tegra/Kconfig10
-rw-r--r--drivers/soc/tegra/Makefile2
-rw-r--r--drivers/soc/tegra/flowctrl.c19
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c198
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c154
-rw-r--r--drivers/soc/tegra/fuse/fuse.h8
-rw-r--r--drivers/soc/tegra/pmc.c279
-rw-r--r--drivers/soc/tegra/regulators-tegra20.c365
-rw-r--r--drivers/soc/tegra/regulators-tegra30.c317
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/omap_prm.c391
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c10
-rw-r--r--drivers/staging/gasket/gasket_constants.h3
-rw-r--r--drivers/staging/gasket/gasket_core.c12
-rw-r--r--drivers/staging/gasket/gasket_core.h4
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c24
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c232
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/target/target_core_tpg.c12
-rw-r--r--drivers/target/target_core_transport.c28
-rw-r--r--drivers/target/target_core_user.c6
-rw-r--r--drivers/target/target_core_xcopy.c1
-rw-r--r--drivers/tee/optee/call.c7
-rw-r--r--drivers/tee/optee/core.c20
-rw-r--r--drivers/tee/optee/shm_pool.c12
-rw-r--r--drivers/thermal/Kconfig12
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/amlogic_thermal.c333
-rw-r--r--drivers/thermal/cpu_cooling.c404
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c31
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.h2
-rw-r--r--drivers/thermal/qcom/tsens-8960.c4
-rw-r--r--drivers/thermal/qcom/tsens-common.c529
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c11
-rw-r--r--drivers/thermal/qcom/tsens-v1.c198
-rw-r--r--drivers/thermal/qcom/tsens-v2.c13
-rw-r--r--drivers/thermal/qcom/tsens.c62
-rw-r--r--drivers/thermal/qcom/tsens.h288
-rw-r--r--drivers/thermal/qoriq_thermal.c120
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c4
-rw-r--r--drivers/thermal/thermal-generic-adc.c9
-rw-r--r--drivers/thermal/thermal_core.c107
-rw-r--r--drivers/thermal/thermal_mmio.c1
-rw-r--r--drivers/tty/Kconfig40
-rw-r--r--drivers/tty/amiserial.c84
-rw-r--r--drivers/tty/hvc/Kconfig28
-rw-r--r--drivers/tty/hvc/hvc_dcc.c28
-rw-r--r--drivers/tty/rocket.c32
-rw-r--r--drivers/tty/serdev/core.c111
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c84
-rw-r--r--drivers/tty/serial/8250/8250_dw.c83
-rw-r--r--drivers/tty/serial/8250/8250_exar.c19
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c21
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c31
-rw-r--r--drivers/tty/serial/8250/8250_pci.c300
-rw-r--r--drivers/tty/serial/8250/8250_port.c14
-rw-r--r--drivers/tty/serial/8250/Kconfig3
-rw-r--r--drivers/tty/serial/Kconfig104
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c12
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c84
-rw-r--r--drivers/tty/serial/ifx6x60.c3
-rw-r--r--drivers/tty/serial/imx.c7
-rw-r--r--drivers/tty/serial/msm_serial.c10
-rw-r--r--drivers/tty/serial/pch_uart.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c68
-rw-r--r--drivers/tty/serial/samsung_tty.c (renamed from drivers/tty/serial/samsung.c)0
-rw-r--r--drivers/tty/serial/serial-tegra.c3
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h5
-rw-r--r--drivers/tty/serial/sprd_serial.c33
-rw-r--r--drivers/tty/serial/stm32-usart.c6
-rw-r--r--drivers/tty/serial/uartlite.c97
-rw-r--r--drivers/tty/tty_io.c14
-rw-r--r--drivers/tty/tty_ldisc.c7
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/tty/vt/vc_screen.c3
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/storage/transport.c3
-rw-r--r--drivers/usb/storage/uas.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c32
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c26
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c4
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--drivers/video/logo/.gitignore1
-rw-r--r--drivers/video/logo/Makefile15
-rw-r--r--drivers/video/logo/pnmtologo.c514
-rw-r--r--drivers/xen/events/events_base.c16
-rw-r--r--drivers/xen/gntdev-common.h2
-rw-r--r--drivers/xen/gntdev-dmabuf.c11
-rw-r--r--drivers/xen/gntdev.c64
-rw-r--r--drivers/xen/platform-pci.c14
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c13
824 files changed, 29306 insertions, 10508 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4fb97511a16f..002838d23b86 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -104,9 +104,9 @@ config ACPI_PROCFS_POWER
depends on X86 && PROC_FS
help
For backwards compatibility, this option allows
- deprecated power /proc/acpi/ directories to exist, even when
- they have been replaced by functions in /sys.
- The deprecated directories (and their replacements) include:
+ deprecated power /proc/acpi/ directories to exist, even when
+ they have been replaced by functions in /sys.
+ The deprecated directories (and their replacements) include:
/proc/acpi/battery/* (/sys/class/power_supply/*) and
/proc/acpi/ac_adapter/* (sys/class/power_supply/*).
This option has no effect on /proc/acpi/ directories
@@ -448,7 +448,7 @@ config ACPI_CUSTOM_METHOD
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
depends on EFI && (X86 || ARM64)
- help
+ help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
data from the firmware boot splash. It will appear under
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 48bc96d45bab..54002670cb7a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -153,7 +153,7 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
{
acpi_status status;
- if (!*data)
+ if (!data)
return -EINVAL;
status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index d27b01c0323d..b758b45737f5 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -79,6 +79,19 @@ MODULE_DEVICE_TABLE(acpi, button_device_ids);
static const struct dmi_system_id dmi_lid_quirks[] = {
{
/*
+ * Acer Switch 10 SW5-012. _LID method messes with home and
+ * power button GPIO IRQ settings causing an interrupt storm on
+ * both GPIOs. This is unfixable without a DSDT override, so we
+ * have to disable the lid-switch functionality altogether :|
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
+ },
+ {
+ /*
* Asus T200TA, _LID keeps reporting closed after every second
* openening of the lid. Causing immediate re-suspend after
* opening every other open. Using LID_INIT_OPEN fixes this.
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 4fd84fbdac29..d05be13c1022 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -533,26 +533,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
}
#ifdef CONFIG_PM_SLEEP
-static bool acpi_ec_query_flushed(struct acpi_ec *ec)
+static void __acpi_ec_flush_work(void)
{
- bool flushed;
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
- flushed = !ec->nr_pending_queries;
- spin_unlock_irqrestore(&ec->lock, flags);
- return flushed;
-}
-
-static void __acpi_ec_flush_event(struct acpi_ec *ec)
-{
- /*
- * When ec_freeze_events is true, we need to flush events in
- * the proper position before entering the noirq stage.
- */
- wait_event(ec->wait, acpi_ec_query_flushed(ec));
- if (ec_query_wq)
- flush_workqueue(ec_query_wq);
+ flush_scheduled_work(); /* flush ec->work */
+ flush_workqueue(ec_query_wq); /* flush queries */
}
static void acpi_ec_disable_event(struct acpi_ec *ec)
@@ -562,15 +546,21 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
spin_unlock_irqrestore(&ec->lock, flags);
- __acpi_ec_flush_event(ec);
+
+ /*
+ * When ec_freeze_events is true, we need to flush events in
+ * the proper position before entering the noirq stage.
+ */
+ __acpi_ec_flush_work();
}
void acpi_ec_flush_work(void)
{
- if (first_ec)
- __acpi_ec_flush_event(first_ec);
+ /* Without ec_query_wq there is nothing to flush. */
+ if (!ec_query_wq)
+ return;
- flush_scheduled_work();
+ __acpi_ec_flush_work();
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a2e844a8e9ed..41168c027a5a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -374,19 +374,21 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
-static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
+/* Must be called with mutex_lock(&acpi_ioremap_lock) */
+static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
- if (!--map->refcount)
+ unsigned long refcount = --map->refcount;
+
+ if (!refcount)
list_del_rcu(&map->list);
+ return refcount;
}
static void acpi_os_map_cleanup(struct acpi_ioremap *map)
{
- if (!map->refcount) {
- synchronize_rcu_expedited();
- acpi_unmap(map->phys, map->virt);
- kfree(map);
- }
+ synchronize_rcu_expedited();
+ acpi_unmap(map->phys, map->virt);
+ kfree(map);
}
/**
@@ -406,6 +408,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
+ unsigned long refcount;
if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
@@ -419,10 +422,11 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
- acpi_os_drop_map_ref(map);
+ refcount = acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
- acpi_os_map_cleanup(map);
+ if (!refcount)
+ acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
@@ -457,6 +461,7 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
struct acpi_ioremap *map;
+ unsigned long refcount;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
@@ -472,10 +477,11 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
mutex_unlock(&acpi_ioremap_lock);
return;
}
- acpi_os_drop_map_ref(map);
+ refcount = acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
- acpi_os_map_cleanup(map);
+ if (!refcount)
+ acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL(acpi_os_unmap_generic_address);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2af937a8b1c5..6747a279621b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -977,6 +977,16 @@ static int acpi_s2idle_prepare_late(void)
return 0;
}
+static void acpi_s2idle_sync(void)
+{
+ /*
+ * The EC driver uses the system workqueue and an additional special
+ * one, so those need to be flushed too.
+ */
+ acpi_ec_flush_work();
+ acpi_os_wait_events_complete(); /* synchronize Notify handling */
+}
+
static void acpi_s2idle_wake(void)
{
/*
@@ -1001,13 +1011,8 @@ static void acpi_s2idle_wake(void)
* should be missed by canceling the wakeup here.
*/
pm_system_cancel_wakeup();
- /*
- * The EC driver uses the system workqueue and an additional
- * special one, so those need to be flushed too.
- */
- acpi_os_wait_events_complete(); /* synchronize EC GPE processing */
- acpi_ec_flush_work();
- acpi_os_wait_events_complete(); /* synchronize Notify handling */
+
+ acpi_s2idle_sync();
rearm_wake_irq(acpi_sci_irq);
}
@@ -1024,6 +1029,13 @@ static void acpi_s2idle_restore_early(void)
static void acpi_s2idle_restore(void)
{
+ /*
+ * Drain pending events before restoring the working-state configuration
+ * of GPEs.
+ */
+ acpi_os_wait_events_complete(); /* synchronize GPE processing */
+ acpi_s2idle_sync();
+
s2idle_wakeup = false;
acpi_enable_all_runtime_gpes();
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 75948a3f1a20..c60d2c6d31d6 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -819,14 +819,14 @@ end:
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
-#define ACPI_MASKABLE_GPE_MAX 0xFF
+#define ACPI_MASKABLE_GPE_MAX 0x100
static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
u8 gpe;
- if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
+ if (kstrtou8(val, 0, &gpe))
return -EINVAL;
set_bit(gpe, acpi_masked_gpes_map);
@@ -838,7 +838,7 @@ void __init acpi_gpe_apply_masked_gpes(void)
{
acpi_handle handle;
acpi_status status;
- u8 gpe;
+ u16 gpe;
for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
status = acpi_get_gpe_device(gpe, &handle);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index ebecab8c3f36..135173c8d138 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -219,7 +219,6 @@ struct arasan_cf_dev {
static struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME),
- .sg_tablesize = SG_NONE,
.dma_boundary = 0xFFFFFFFFUL,
};
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index cfd0cf2cbca6..e01a3a6e4d46 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -422,7 +422,7 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
#ifdef ATP867X_DEBUG
atp867x_check_res(pdev);
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
(unsigned long long)(host->iomap[i]));
#endif
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 65ec8dff1c51..f3e62f5528bd 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -2329,7 +2329,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
// Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise,
// it's an IDE controller and we ignore it.
- for (bar = 0; bar < 6; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index bef6b85778b6..874c259a8829 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -288,31 +288,6 @@ static int charlcd_init_display(struct charlcd *lcd)
}
/*
- * Parses an unsigned integer from a string, until a non-digit character
- * is found. The empty string is not accepted. No overflow checks are done.
- *
- * Returns whether the parsing was successful. Only in that case
- * the output parameters are written to.
- *
- * TODO: If the kernel adds an inplace version of kstrtoul(), this function
- * could be easily replaced by that.
- */
-static bool parse_n(const char *s, unsigned long *res, const char **next_s)
-{
- if (!isdigit(*s))
- return false;
-
- *res = 0;
- while (isdigit(*s)) {
- *res = *res * 10 + (*s - '0');
- ++s;
- }
-
- *next_s = s;
- return true;
-}
-
-/*
* Parses a movement command of the form "(.*);", where the group can be
* any number of subcommands of the form "(x|y)[0-9]+".
*
@@ -336,6 +311,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
{
unsigned long new_x = *x;
unsigned long new_y = *y;
+ char *p;
for (;;) {
if (!*s)
@@ -345,11 +321,15 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
break;
if (*s == 'x') {
- if (!parse_n(s + 1, &new_x, &s))
+ new_x = simple_strtoul(s + 1, &p, 10);
+ if (p == s + 1)
return false;
+ s = p;
} else if (*s == 'y') {
- if (!parse_n(s + 1, &new_y, &s))
+ new_y = simple_strtoul(s + 1, &p, 10);
+ if (p == s + 1)
return false;
+ s = p;
} else {
return false;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 28b92e3cc570..c3b3b5c0b0da 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -148,6 +148,10 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+config PM_QOS_KUNIT_TEST
+ bool "KUnit Test for PM QoS features"
+ depends on KUNIT
+
config HMEM_REPORTING
bool
default n
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 296546ffed6c..98a31bafc8a2 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -496,20 +496,17 @@ static ssize_t node_read_vmstat(struct device *dev,
int n = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
+ n += sprintf(buf+n, "%s %lu\n", zone_stat_name(i),
sum_zone_node_page_state(nid, i));
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
- n += sprintf(buf+n, "%s %lu\n",
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
+ n += sprintf(buf+n, "%s %lu\n", numa_stat_name(i),
sum_zone_numa_state(nid, i));
#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- n += sprintf(buf+n, "%s %lu\n",
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS],
+ n += sprintf(buf+n, "%s %lu\n", node_stat_name(i),
node_page_state(pgdat, i));
return n;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index ec5bb190b9d0..8fdd0073eeeb 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
+obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
new file mode 100644
index 000000000000..3115db08d56b
--- /dev/null
+++ b/drivers/base/power/qos-test.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+#include <kunit/test.h>
+#include <linux/pm_qos.h>
+
+/* Basic test for aggregating two "min" requests */
+static void freq_qos_test_min(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+
+ ret = freq_qos_remove_request(&req2);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+}
+
+/* Test that requests for MAX_DEFAULT_VALUE have no effect */
+static void freq_qos_test_maxdef(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX),
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* Add max 1000 */
+ ret = freq_qos_update_request(&req1, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Add max 2000, no impact */
+ ret = freq_qos_update_request(&req2, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Remove max 1000, new max 2000 */
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000);
+}
+
+/*
+ * Test that a freq_qos_request can be added again after removal
+ *
+ * This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency
+ * QoS requests after removal")
+ */
+static void freq_qos_test_readd(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req, 0, sizeof(req));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ /* Remove */
+ ret = freq_qos_remove_request(&req);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add again */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+}
+
+static struct kunit_case pm_qos_test_cases[] = {
+ KUNIT_CASE(freq_qos_test_min),
+ KUNIT_CASE(freq_qos_test_maxdef),
+ KUNIT_CASE(freq_qos_test_readd),
+ {},
+};
+
+static struct kunit_suite pm_qos_test_module = {
+ .name = "qos-kunit-test",
+ .test_cases = pm_qos_test_cases,
+};
+kunit_test_suite(pm_qos_test_module);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 350dcafd751f..8e93167f1783 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -115,10 +115,20 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
spin_lock_irqsave(&dev->power.lock, flags);
- if (type == DEV_PM_QOS_RESUME_LATENCY) {
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency);
- } else {
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
+ break;
+ default:
WARN_ON(1);
ret = 0;
}
@@ -159,6 +169,10 @@ static int apply_constraint(struct dev_pm_qos_request *req,
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_apply(&req->data.freq, action, value);
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -209,6 +223,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
+ freq_constraints_init(&qos->freq);
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -269,6 +285,20 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
memset(req, 0, sizeof(*req));
}
+ c = &qos->freq.min_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->freq.max_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -314,11 +344,22 @@ static int __dev_pm_qos_add_request(struct device *dev,
ret = dev_pm_qos_constraints_allocate(dev);
trace_dev_pm_qos_add_request(dev_name(dev), type, value);
- if (!ret) {
- req->dev = dev;
- req->type = type;
+ if (ret)
+ return ret;
+
+ req->dev = dev;
+ req->type = type;
+ if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MIN, value);
+ else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MAX, value);
+ else
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
- }
+
return ret;
}
@@ -382,6 +423,10 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ curr_value = req->data.freq.pnode.prio;
+ break;
case DEV_PM_QOS_FLAGS:
curr_value = req->data.flr.flags;
break;
@@ -507,6 +552,14 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
default:
WARN_ON(1);
ret = -EINVAL;
@@ -546,6 +599,14 @@ int dev_pm_qos_remove_notifier(struct device *dev,
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
default:
WARN_ON(1);
ret = -EINVAL;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5817b51d2b15..70a9edb5f525 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -248,6 +248,60 @@ void wakeup_source_unregister(struct wakeup_source *ws)
EXPORT_SYMBOL_GPL(wakeup_source_unregister);
/**
+ * wakeup_sources_read_lock - Lock wakeup source list for read.
+ *
+ * Returns an index of srcu lock for struct wakeup_srcu.
+ * This index must be passed to the matching wakeup_sources_read_unlock().
+ */
+int wakeup_sources_read_lock(void)
+{
+ return srcu_read_lock(&wakeup_srcu);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
+
+/**
+ * wakeup_sources_read_unlock - Unlock wakeup source list.
+ * @idx: return value from corresponding wakeup_sources_read_lock()
+ */
+void wakeup_sources_read_unlock(int idx)
+{
+ srcu_read_unlock(&wakeup_srcu, idx);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
+
+/**
+ * wakeup_sources_walk_start - Begin a walk on wakeup source list
+ *
+ * Returns first object of the list of wakeup sources.
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_start(void)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
+
+/**
+ * wakeup_sources_walk_next - Get next wakeup source from the list
+ * @ws: Previous wakeup source object
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_next_or_null_rcu(ws_head, &ws->entry,
+ struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
+
+/**
* device_wakeup_attach - Attach a wakeup source object to a device object.
* @dev: Device to handle.
* @ws: Wakeup source object to attach to @dev.
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c548a5a6c1a0..a8730cc4db10 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -297,6 +297,10 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
unsigned int len = bvec.bv_len;
int err;
+ /* Don't support un-aligned buffer */
+ WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
+ (len & (SECTOR_SIZE - 1)));
+
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
bio_op(bio), sector);
if (err)
@@ -382,7 +386,6 @@ static struct brd_device *brd_alloc(int i)
goto out_free_dev;
blk_queue_make_request(brd->brd_queue, brd_make_request);
- blk_queue_max_hw_sectors(brd->brd_queue, 1024);
/* This is so fdisk will align partitions on 4k, because of
* direct_access API needing 4k alignment, returning a PFN
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 795fda576824..ae8d4bc532b0 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1559,14 +1559,13 @@ static int init_driver_queues(struct nullb *nullb)
static int null_gendisk_register(struct nullb *nullb)
{
+ sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
struct gendisk *disk;
- sector_t size;
disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
if (!disk)
return -ENOMEM;
- size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
- set_capacity(disk, size >> 9);
+ set_capacity(disk, size);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
@@ -1576,12 +1575,19 @@ static int null_gendisk_register(struct nullb *nullb)
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+#ifdef CONFIG_BLK_DEV_ZONED
if (nullb->dev->zoned) {
- int ret = blk_revalidate_disk_zones(disk);
-
- if (ret != 0)
- return ret;
+ if (queue_is_mq(nullb->q)) {
+ int ret = blk_revalidate_disk_zones(disk);
+ if (ret)
+ return ret;
+ } else {
+ blk_queue_chunk_sectors(nullb->q,
+ nullb->dev->zone_size_sects);
+ nullb->q->nr_zones = blkdev_nr_zones(disk);
+ }
}
+#endif
add_disk(disk);
return 0;
@@ -1607,7 +1613,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
return blk_mq_alloc_tag_set(set);
}
-static void null_validate_conf(struct nullb_device *dev)
+static int null_validate_conf(struct nullb_device *dev)
{
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
@@ -1634,6 +1640,14 @@ static void null_validate_conf(struct nullb_device *dev)
/* can not stop a queue */
if (dev->queue_mode == NULL_Q_BIO)
dev->mbps = 0;
+
+ if (dev->zoned &&
+ (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
+ pr_err("zone_size must be power-of-two\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
@@ -1666,7 +1680,9 @@ static int null_add_dev(struct nullb_device *dev)
struct nullb *nullb;
int rv;
- null_validate_conf(dev);
+ rv = null_validate_conf(dev);
+ if (rv)
+ return rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
if (!nullb) {
@@ -1731,7 +1747,6 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_blk_queue;
- blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
nullb->q->limits.zoned = BLK_ZONED_HM;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
blk_queue_required_elevator_features(nullb->q,
@@ -1792,11 +1807,6 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
- if (!is_power_of_2(g_zone_size)) {
- pr_err("zone_size must be power-of-two\n");
- return -EINVAL;
- }
-
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
pr_err("invalid home_node value\n");
g_home_node = NUMA_NO_NODE;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 13527a0b4e44..2b184563cd32 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -34,7 +34,7 @@
#include <linux/ceph/cls_lock_client.h>
#include <linux/ceph/striper.h>
#include <linux/ceph/decode.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
#include <linux/bsearch.h>
#include <linux/kernel.h>
@@ -377,7 +377,6 @@ struct rbd_client_id {
struct rbd_mapping {
u64 size;
- u64 features;
};
/*
@@ -462,8 +461,9 @@ struct rbd_device {
* by rbd_dev->lock
*/
enum rbd_dev_flags {
- RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
+ RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
+ RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -514,6 +514,16 @@ static int minor_to_rbd_dev_id(int minor)
return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
}
+static bool rbd_is_ro(struct rbd_device *rbd_dev)
+{
+ return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
+}
+
+static bool rbd_is_snap(struct rbd_device *rbd_dev)
+{
+ return rbd_dev->spec->snap_id != CEPH_NOSNAP;
+}
+
static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
{
lockdep_assert_held(&rbd_dev->lock_rwsem);
@@ -633,8 +643,6 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size);
-static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
- u64 *snap_features);
static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
@@ -695,9 +703,16 @@ static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
if (get_user(ro, (int __user *)arg))
return -EFAULT;
- /* Snapshots can't be marked read-write */
- if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
- return -EROFS;
+ /*
+ * Both images mapped read-only and snapshots can't be marked
+ * read-write.
+ */
+ if (!ro) {
+ if (rbd_is_ro(rbd_dev))
+ return -EROFS;
+
+ rbd_assert(!rbd_is_snap(rbd_dev));
+ }
/* Let blkdev_roset() handle it */
return -ENOTTY;
@@ -823,34 +838,34 @@ enum {
Opt_queue_depth,
Opt_alloc_size,
Opt_lock_timeout,
- Opt_last_int,
/* int args above */
Opt_pool_ns,
- Opt_last_string,
/* string args above */
Opt_read_only,
Opt_read_write,
Opt_lock_on_read,
Opt_exclusive,
Opt_notrim,
- Opt_err
};
-static match_table_t rbd_opts_tokens = {
- {Opt_queue_depth, "queue_depth=%d"},
- {Opt_alloc_size, "alloc_size=%d"},
- {Opt_lock_timeout, "lock_timeout=%d"},
- /* int args above */
- {Opt_pool_ns, "_pool_ns=%s"},
- /* string args above */
- {Opt_read_only, "read_only"},
- {Opt_read_only, "ro"}, /* Alternate spelling */
- {Opt_read_write, "read_write"},
- {Opt_read_write, "rw"}, /* Alternate spelling */
- {Opt_lock_on_read, "lock_on_read"},
- {Opt_exclusive, "exclusive"},
- {Opt_notrim, "notrim"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec rbd_param_specs[] = {
+ fsparam_u32 ("alloc_size", Opt_alloc_size),
+ fsparam_flag ("exclusive", Opt_exclusive),
+ fsparam_flag ("lock_on_read", Opt_lock_on_read),
+ fsparam_u32 ("lock_timeout", Opt_lock_timeout),
+ fsparam_flag ("notrim", Opt_notrim),
+ fsparam_string ("_pool_ns", Opt_pool_ns),
+ fsparam_u32 ("queue_depth", Opt_queue_depth),
+ fsparam_flag ("read_only", Opt_read_only),
+ fsparam_flag ("read_write", Opt_read_write),
+ fsparam_flag ("ro", Opt_read_only),
+ fsparam_flag ("rw", Opt_read_write),
+ {}
+};
+
+static const struct fs_parameter_description rbd_parameters = {
+ .name = "rbd",
+ .specs = rbd_param_specs,
};
struct rbd_options {
@@ -871,87 +886,12 @@ struct rbd_options {
#define RBD_EXCLUSIVE_DEFAULT false
#define RBD_TRIM_DEFAULT true
-struct parse_rbd_opts_ctx {
+struct rbd_parse_opts_ctx {
struct rbd_spec *spec;
+ struct ceph_options *copts;
struct rbd_options *opts;
};
-static int parse_rbd_opts_token(char *c, void *private)
-{
- struct parse_rbd_opts_ctx *pctx = private;
- substring_t argstr[MAX_OPT_ARGS];
- int token, intval, ret;
-
- token = match_token(c, rbd_opts_tokens, argstr);
- if (token < Opt_last_int) {
- ret = match_int(&argstr[0], &intval);
- if (ret < 0) {
- pr_err("bad option arg (not int) at '%s'\n", c);
- return ret;
- }
- dout("got int token %d val %d\n", token, intval);
- } else if (token > Opt_last_int && token < Opt_last_string) {
- dout("got string token %d val %s\n", token, argstr[0].from);
- } else {
- dout("got token %d\n", token);
- }
-
- switch (token) {
- case Opt_queue_depth:
- if (intval < 1) {
- pr_err("queue_depth out of range\n");
- return -EINVAL;
- }
- pctx->opts->queue_depth = intval;
- break;
- case Opt_alloc_size:
- if (intval < SECTOR_SIZE) {
- pr_err("alloc_size out of range\n");
- return -EINVAL;
- }
- if (!is_power_of_2(intval)) {
- pr_err("alloc_size must be a power of 2\n");
- return -EINVAL;
- }
- pctx->opts->alloc_size = intval;
- break;
- case Opt_lock_timeout:
- /* 0 is "wait forever" (i.e. infinite timeout) */
- if (intval < 0 || intval > INT_MAX / 1000) {
- pr_err("lock_timeout out of range\n");
- return -EINVAL;
- }
- pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
- break;
- case Opt_pool_ns:
- kfree(pctx->spec->pool_ns);
- pctx->spec->pool_ns = match_strdup(argstr);
- if (!pctx->spec->pool_ns)
- return -ENOMEM;
- break;
- case Opt_read_only:
- pctx->opts->read_only = true;
- break;
- case Opt_read_write:
- pctx->opts->read_only = false;
- break;
- case Opt_lock_on_read:
- pctx->opts->lock_on_read = true;
- break;
- case Opt_exclusive:
- pctx->opts->exclusive = true;
- break;
- case Opt_notrim:
- pctx->opts->trim = false;
- break;
- default:
- /* libceph prints "bad option" msg */
- return -EINVAL;
- }
-
- return 0;
-}
-
static char* obj_op_name(enum obj_operation_type op_type)
{
switch (op_type) {
@@ -1302,51 +1242,23 @@ static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
-static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
- u64 *snap_features)
-{
- rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
- if (snap_id == CEPH_NOSNAP) {
- *snap_features = rbd_dev->header.features;
- } else if (rbd_dev->image_format == 1) {
- *snap_features = 0; /* No features for format 1 */
- } else {
- u64 features = 0;
- int ret;
-
- ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
- if (ret)
- return ret;
-
- *snap_features = features;
- }
- return 0;
-}
-
static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
u64 snap_id = rbd_dev->spec->snap_id;
u64 size = 0;
- u64 features = 0;
int ret;
ret = rbd_snap_size(rbd_dev, snap_id, &size);
if (ret)
return ret;
- ret = rbd_snap_features(rbd_dev, snap_id, &features);
- if (ret)
- return ret;
rbd_dev->mapping.size = size;
- rbd_dev->mapping.features = features;
-
return 0;
}
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
rbd_dev->mapping.size = 0;
- rbd_dev->mapping.features = 0;
}
static void zero_bvec(struct bio_vec *bv)
@@ -1832,6 +1744,17 @@ static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
static bool use_object_map(struct rbd_device *rbd_dev)
{
+ /*
+ * An image mapped read-only can't use the object map -- it isn't
+ * loaded because the header lock isn't acquired. Someone else can
+ * write to the image and update the object map behind our back.
+ *
+ * A snapshot can't be written to, so using the object map is always
+ * safe.
+ */
+ if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
+ return false;
+
return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
!(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
}
@@ -3555,7 +3478,7 @@ static bool need_exclusive_lock(struct rbd_img_request *img_req)
if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
return false;
- if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+ if (rbd_is_ro(rbd_dev))
return false;
rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
@@ -4230,7 +4153,7 @@ again:
* lock owner acked, but resend if we don't see them
* release the lock
*/
- dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
+ dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
rbd_dev);
mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
@@ -4826,24 +4749,14 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
- rbd_warn(rbd_dev, "%s on read-only snapshot",
- obj_op_name(op_type));
- result = -EIO;
- goto err;
- }
-
- /*
- * Quit early if the mapped snapshot no longer exists. It's
- * still possible the snapshot will have disappeared by the
- * time our request arrives at the osd, but there's no sense in
- * sending it if we already know.
- */
- if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
- dout("request for non-existent snapshot");
- rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
- result = -ENXIO;
- goto err_rq;
+ if (op_type != OBJ_OP_READ) {
+ if (rbd_is_ro(rbd_dev)) {
+ rbd_warn(rbd_dev, "%s on read-only mapping",
+ obj_op_name(op_type));
+ result = -EIO;
+ goto err;
+ }
+ rbd_assert(!rbd_is_snap(rbd_dev));
}
if (offset && length > U64_MAX - offset + 1) {
@@ -5025,25 +4938,6 @@ out:
return ret;
}
-/*
- * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
- * has disappeared from the (just updated) snapshot context.
- */
-static void rbd_exists_validate(struct rbd_device *rbd_dev)
-{
- u64 snap_id;
-
- if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
- return;
-
- snap_id = rbd_dev->spec->snap_id;
- if (snap_id == CEPH_NOSNAP)
- return;
-
- if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
- clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
-}
-
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
@@ -5084,12 +4978,8 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
goto out;
}
- if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
- rbd_dev->mapping.size = rbd_dev->header.image_size;
- } else {
- /* validate mapped snapshot's EXISTS flag */
- rbd_exists_validate(rbd_dev);
- }
+ rbd_assert(!rbd_is_snap(rbd_dev));
+ rbd_dev->mapping.size = rbd_dev->header.image_size;
out:
up_write(&rbd_dev->header_rwsem);
@@ -5211,17 +5101,12 @@ static ssize_t rbd_size_show(struct device *dev,
(unsigned long long)rbd_dev->mapping.size);
}
-/*
- * Note this shows the features for whatever's mapped, which is not
- * necessarily the base image.
- */
static ssize_t rbd_features_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "0x%016llx\n",
- (unsigned long long)rbd_dev->mapping.features);
+ return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
}
static ssize_t rbd_major_show(struct device *dev,
@@ -5709,9 +5594,12 @@ out:
}
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
- u64 *snap_features)
+ bool read_only, u64 *snap_features)
{
- __le64 snapid = cpu_to_le64(snap_id);
+ struct {
+ __le64 snap_id;
+ u8 read_only;
+ } features_in;
struct {
__le64 features;
__le64 incompat;
@@ -5719,9 +5607,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 unsup;
int ret;
+ features_in.snap_id = cpu_to_le64(snap_id);
+ features_in.read_only = read_only;
+
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_features",
- &snapid, sizeof(snapid),
+ &features_in, sizeof(features_in),
&features_buf, sizeof(features_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
@@ -5749,7 +5640,8 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
- &rbd_dev->header.features);
+ rbd_is_ro(rbd_dev),
+ &rbd_dev->header.features);
}
/*
@@ -6456,6 +6348,122 @@ static inline char *dup_token(const char **buf, size_t *lenp)
return dup;
}
+static int rbd_parse_param(struct fs_parameter *param,
+ struct rbd_parse_opts_ctx *pctx)
+{
+ struct rbd_options *opt = pctx->opts;
+ struct fs_parse_result result;
+ int token, ret;
+
+ ret = ceph_parse_param(param, pctx->copts, NULL);
+ if (ret != -ENOPARAM)
+ return ret;
+
+ token = fs_parse(NULL, &rbd_parameters, param, &result);
+ dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
+ if (token < 0) {
+ if (token == -ENOPARAM) {
+ return invalf(NULL, "rbd: Unknown parameter '%s'",
+ param->key);
+ }
+ return token;
+ }
+
+ switch (token) {
+ case Opt_queue_depth:
+ if (result.uint_32 < 1)
+ goto out_of_range;
+ opt->queue_depth = result.uint_32;
+ break;
+ case Opt_alloc_size:
+ if (result.uint_32 < SECTOR_SIZE)
+ goto out_of_range;
+ if (!is_power_of_2(result.uint_32)) {
+ return invalf(NULL, "rbd: alloc_size must be a power of 2");
+ }
+ opt->alloc_size = result.uint_32;
+ break;
+ case Opt_lock_timeout:
+ /* 0 is "wait forever" (i.e. infinite timeout) */
+ if (result.uint_32 > INT_MAX / 1000)
+ goto out_of_range;
+ opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
+ break;
+ case Opt_pool_ns:
+ kfree(pctx->spec->pool_ns);
+ pctx->spec->pool_ns = param->string;
+ param->string = NULL;
+ break;
+ case Opt_read_only:
+ opt->read_only = true;
+ break;
+ case Opt_read_write:
+ opt->read_only = false;
+ break;
+ case Opt_lock_on_read:
+ opt->lock_on_read = true;
+ break;
+ case Opt_exclusive:
+ opt->exclusive = true;
+ break;
+ case Opt_notrim:
+ opt->trim = false;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+
+out_of_range:
+ return invalf(NULL, "rbd: %s out of range", param->key);
+}
+
+/*
+ * This duplicates most of generic_parse_monolithic(), untying it from
+ * fs_context and skipping standard superblock and security options.
+ */
+static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
+{
+ char *key;
+ int ret = 0;
+
+ dout("%s '%s'\n", __func__, options);
+ while ((key = strsep(&options, ",")) != NULL) {
+ if (*key) {
+ struct fs_parameter param = {
+ .key = key,
+ .type = fs_value_is_string,
+ };
+ char *value = strchr(key, '=');
+ size_t v_len = 0;
+
+ if (value) {
+ if (value == key)
+ continue;
+ *value++ = 0;
+ v_len = strlen(value);
+ }
+
+
+ if (v_len > 0) {
+ param.string = kmemdup_nul(value, v_len,
+ GFP_KERNEL);
+ if (!param.string)
+ return -ENOMEM;
+ }
+ param.size = v_len;
+
+ ret = rbd_parse_param(&param, pctx);
+ kfree(param.string);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
/*
* Parse the options provided for an "rbd add" (i.e., rbd image
* mapping) request. These arrive via a write to /sys/bus/rbd/add,
@@ -6507,8 +6515,7 @@ static int rbd_add_parse_args(const char *buf,
const char *mon_addrs;
char *snap_name;
size_t mon_addrs_size;
- struct parse_rbd_opts_ctx pctx = { 0 };
- struct ceph_options *copts;
+ struct rbd_parse_opts_ctx pctx = { 0 };
int ret;
/* The first four tokens are required */
@@ -6519,7 +6526,7 @@ static int rbd_add_parse_args(const char *buf,
return -EINVAL;
}
mon_addrs = buf;
- mon_addrs_size = len + 1;
+ mon_addrs_size = len;
buf += len;
ret = -EINVAL;
@@ -6569,6 +6576,10 @@ static int rbd_add_parse_args(const char *buf,
*(snap_name + len) = '\0';
pctx.spec->snap_name = snap_name;
+ pctx.copts = ceph_alloc_options();
+ if (!pctx.copts)
+ goto out_mem;
+
/* Initialize all rbd options to the defaults */
pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
@@ -6583,27 +6594,27 @@ static int rbd_add_parse_args(const char *buf,
pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
pctx.opts->trim = RBD_TRIM_DEFAULT;
- copts = ceph_parse_options(options, mon_addrs,
- mon_addrs + mon_addrs_size - 1,
- parse_rbd_opts_token, &pctx);
- if (IS_ERR(copts)) {
- ret = PTR_ERR(copts);
+ ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = rbd_parse_options(options, &pctx);
+ if (ret)
goto out_err;
- }
- kfree(options);
- *ceph_opts = copts;
+ *ceph_opts = pctx.copts;
*opts = pctx.opts;
*rbd_spec = pctx.spec;
-
+ kfree(options);
return 0;
+
out_mem:
ret = -ENOMEM;
out_err:
kfree(pctx.opts);
+ ceph_destroy_options(pctx.copts);
rbd_spec_put(pctx.spec);
kfree(options);
-
return ret;
}
@@ -6632,7 +6643,7 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
return -EINVAL;
}
- if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+ if (rbd_is_ro(rbd_dev))
return 0;
rbd_assert(!rbd_is_lock_owner(rbd_dev));
@@ -6838,6 +6849,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
__rbd_get_client(rbd_dev->rbd_client);
rbd_spec_get(rbd_dev->parent_spec);
+ __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
+
ret = rbd_dev_image_probe(parent, depth);
if (ret < 0)
goto out_err;
@@ -6889,7 +6902,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
goto err_out_blkdev;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
- set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
+ set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
@@ -6927,6 +6940,24 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
return ret;
}
+static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
+{
+ if (!is_snap) {
+ pr_info("image %s/%s%s%s does not exist\n",
+ rbd_dev->spec->pool_name,
+ rbd_dev->spec->pool_ns ?: "",
+ rbd_dev->spec->pool_ns ? "/" : "",
+ rbd_dev->spec->image_name);
+ } else {
+ pr_info("snap %s/%s%s%s@%s does not exist\n",
+ rbd_dev->spec->pool_name,
+ rbd_dev->spec->pool_ns ?: "",
+ rbd_dev->spec->pool_ns ? "/" : "",
+ rbd_dev->spec->image_name,
+ rbd_dev->spec->snap_name);
+ }
+}
+
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
rbd_dev_unprobe(rbd_dev);
@@ -6945,6 +6976,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
*/
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
{
+ bool need_watch = !rbd_is_ro(rbd_dev);
int ret;
/*
@@ -6961,22 +6993,21 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (ret)
goto err_out_format;
- if (!depth) {
+ if (need_watch) {
ret = rbd_register_watch(rbd_dev);
if (ret) {
if (ret == -ENOENT)
- pr_info("image %s/%s%s%s does not exist\n",
- rbd_dev->spec->pool_name,
- rbd_dev->spec->pool_ns ?: "",
- rbd_dev->spec->pool_ns ? "/" : "",
- rbd_dev->spec->image_name);
+ rbd_print_dne(rbd_dev, false);
goto err_out_format;
}
}
ret = rbd_dev_header_info(rbd_dev);
- if (ret)
+ if (ret) {
+ if (ret == -ENOENT && !need_watch)
+ rbd_print_dne(rbd_dev, false);
goto err_out_watch;
+ }
/*
* If this image is the one being mapped, we have pool name and
@@ -6990,12 +7021,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
ret = rbd_spec_fill_names(rbd_dev);
if (ret) {
if (ret == -ENOENT)
- pr_info("snap %s/%s%s%s@%s does not exist\n",
- rbd_dev->spec->pool_name,
- rbd_dev->spec->pool_ns ?: "",
- rbd_dev->spec->pool_ns ? "/" : "",
- rbd_dev->spec->image_name,
- rbd_dev->spec->snap_name);
+ rbd_print_dne(rbd_dev, true);
goto err_out_probe;
}
@@ -7003,7 +7029,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (ret)
goto err_out_probe;
- if (rbd_dev->spec->snap_id != CEPH_NOSNAP &&
+ if (rbd_is_snap(rbd_dev) &&
(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
ret = rbd_object_map_load(rbd_dev);
if (ret)
@@ -7027,7 +7053,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
- if (!depth)
+ if (need_watch)
rbd_unregister_watch(rbd_dev);
err_out_format:
rbd_dev->image_format = 0;
@@ -7079,6 +7105,11 @@ static ssize_t do_rbd_add(struct bus_type *bus,
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
+ /* if we are mapping a snapshot it will be a read-only mapping */
+ if (rbd_dev->opts->read_only ||
+ strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
+ __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
+
rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
if (!rbd_dev->config_info) {
rc = -ENOMEM;
@@ -7092,10 +7123,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
goto err_out_rbd_dev;
}
- /* If we are mapping a snapshot it must be marked read-only */
- if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
- rbd_dev->opts->read_only = true;
-
if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
rbd_warn(rbd_dev, "alloc_size adjusted to %u",
rbd_dev->layout.object_size);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index fd1e19f1a49f..716b99aa2307 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -936,6 +936,8 @@ next:
out_of_memory:
pr_alert("%s: out of memory\n", __func__);
put_free_pages(ring, pages_to_gnt, segs_to_map);
+ for (i = last_map; i < num; i++)
+ pages[i]->handle = BLKBACK_INVALID_HANDLE;
return -ENOMEM;
}
@@ -1504,5 +1506,13 @@ static int __init xen_blkif_init(void)
module_init(xen_blkif_init);
+static void __exit xen_blkif_fini(void)
+{
+ xen_blkif_xenbus_fini();
+ xen_blkif_interface_fini();
+}
+
+module_exit(xen_blkif_fini);
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vbd");
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 1d3002d773f7..49132b0adbbe 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -375,9 +375,12 @@ struct phys_req {
struct block_device *bdev;
blkif_sector_t sector_number;
};
+
int xen_blkif_interface_init(void);
+void xen_blkif_interface_fini(void);
int xen_blkif_xenbus_init(void);
+void xen_blkif_xenbus_fini(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index b90dbcd99c03..e8c5c54e1d26 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -333,6 +333,12 @@ int __init xen_blkif_interface_init(void)
return 0;
}
+void xen_blkif_interface_fini(void)
+{
+ kmem_cache_destroy(xen_blkif_cachep);
+ xen_blkif_cachep = NULL;
+}
+
/*
* sysfs interface for VBD I/O requests
*/
@@ -1122,3 +1128,8 @@ int xen_blkif_xenbus_init(void)
{
return xenbus_register_backend(&xen_blkbk_driver);
}
+
+void xen_blkif_xenbus_fini(void)
+{
+ xenbus_unregister_driver(&xen_blkbk_driver);
+}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 97ab5ad171d4..50200d1c06ea 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -41,8 +41,9 @@ config MOXTET
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
- depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
- select INDIRECT_PIO
+ depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC && !C6X)
+ depends on HAS_IOMEM
+ select INDIRECT_PIO if ARM64
help
Driver to enable I/O access to devices attached to the Low Pin
Count bus on the HiSilicon Hip06/7 SoC.
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 20c957185af2..8101df901830 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -74,7 +74,7 @@ struct hisi_lpc_dev {
/* About 10us. This is specific for single IO operations, such as inb */
#define LPC_PEROP_WAITCNT 100
-static int wait_lpc_idle(unsigned char *mbase, unsigned int waitcnt)
+static int wait_lpc_idle(void __iomem *mbase, unsigned int waitcnt)
{
u32 status;
@@ -209,7 +209,7 @@ static u32 hisi_lpc_comm_in(void *hostdata, unsigned long pio, size_t dwidth)
struct hisi_lpc_dev *lpcdev = hostdata;
struct lpc_cycle_para iopara;
unsigned long addr;
- u32 rd_data = 0;
+ __le32 rd_data = 0;
int ret;
if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
@@ -244,13 +244,12 @@ static void hisi_lpc_comm_out(void *hostdata, unsigned long pio,
struct lpc_cycle_para iopara;
const unsigned char *buf;
unsigned long addr;
+ __le32 _val = cpu_to_le32(val);
if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
return;
- val = cpu_to_le32(val);
-
- buf = (const unsigned char *)&val;
+ buf = (const unsigned char *)&_val;
addr = hisi_lpc_pio_to_addr(lpcdev, pio);
iopara.opflags = FG_INCRADDR_LPC;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 2b6670daf7fc..56887c6877a7 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -917,6 +917,9 @@ set_midle:
return -EINVAL;
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
+ best_mode = SYSC_IDLE_NO;
+
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
@@ -978,6 +981,9 @@ static int sysc_disable_module(struct device *dev)
return ret;
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
+ best_mode = SYSC_IDLE_FORCE;
+
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
@@ -1037,8 +1043,6 @@ static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
struct ti_sysc_platform_data *pdata;
int error;
- reset_control_deassert(ddata->rsts);
-
pdata = dev_get_platdata(ddata->dev);
if (!pdata)
return 0;
@@ -1051,6 +1055,8 @@ static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
dev_err(dev, "%s: could not enable: %i\n",
__func__, error);
+ reset_control_deassert(ddata->rsts);
+
return 0;
}
@@ -1104,8 +1110,6 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
sysc_clkdm_deny_idle(ddata);
- reset_control_deassert(ddata->rsts);
-
if (sysc_opt_clks_needed(ddata)) {
error = sysc_enable_opt_clocks(ddata);
if (error)
@@ -1116,6 +1120,8 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
if (error)
goto err_opt_clocks;
+ reset_control_deassert(ddata->rsts);
+
if (ddata->legacy_mode) {
error = sysc_runtime_resume_legacy(dev, ddata);
if (error)
@@ -1236,6 +1242,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE),
/* Quirks that need to be set based on detected module */
+ SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
+ SYSC_MODULE_QUIRK_AESS),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
SYSC_MODULE_QUIRK_HDQ1W),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
@@ -1251,6 +1259,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
SYSC_MODULE_QUIRK_SGX),
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
/* Watchdog on am3 and am4 */
@@ -1260,7 +1272,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0),
- SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, 0),
SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0),
SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
@@ -1309,8 +1320,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
- SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
- 0xffffffff, 0),
SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0),
#endif
};
@@ -1394,6 +1403,14 @@ static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
sysc_write(ddata, offset, val);
}
+/* AESS (Audio Engine SubSystem) needs autogating set after enable */
+static void sysc_module_enable_quirk_aess(struct sysc *ddata)
+{
+ int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
+
+ sysc_write(ddata, offset, 1);
+}
+
/* I2C needs extra enable bit toggling for reset */
static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
{
@@ -1476,6 +1493,9 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
+ ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
@@ -1532,37 +1552,6 @@ static int sysc_legacy_init(struct sysc *ddata)
return error;
}
-/**
- * sysc_rstctrl_reset_deassert - deassert rstctrl reset
- * @ddata: device driver data
- * @reset: reset before deassert
- *
- * A module can have both OCP softreset control and external rstctrl.
- * If more complicated rstctrl resets are needed, please handle these
- * directly from the child device driver and map only the module reset
- * for the parent interconnect target module device.
- *
- * Automatic reset of the module on init can be skipped with the
- * "ti,no-reset-on-init" device tree property.
- */
-static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
-{
- int error;
-
- if (!ddata->rsts)
- return 0;
-
- if (reset) {
- error = reset_control_assert(ddata->rsts);
- if (error)
- return error;
- }
-
- reset_control_deassert(ddata->rsts);
-
- return 0;
-}
-
/*
* Note that the caller must ensure the interconnect target module is enabled
* before calling reset. Otherwise reset will not complete.
@@ -1625,15 +1614,6 @@ static int sysc_reset(struct sysc *ddata)
static int sysc_init_module(struct sysc *ddata)
{
int error = 0;
- bool manage_clocks = true;
-
- error = sysc_rstctrl_reset_deassert(ddata, false);
- if (error)
- return error;
-
- if (ddata->cfg.quirks &
- (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
- manage_clocks = false;
error = sysc_clockdomain_init(ddata);
if (error)
@@ -1654,7 +1634,7 @@ static int sysc_init_module(struct sysc *ddata)
goto err_opt_clocks;
if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
- error = sysc_rstctrl_reset_deassert(ddata, true);
+ error = reset_control_deassert(ddata->rsts);
if (error)
goto err_main_clocks;
}
@@ -1666,28 +1646,32 @@ static int sysc_init_module(struct sysc *ddata)
if (ddata->legacy_mode) {
error = sysc_legacy_init(ddata);
if (error)
- goto err_main_clocks;
+ goto err_reset;
}
if (!ddata->legacy_mode) {
error = sysc_enable_module(ddata->dev);
if (error)
- goto err_main_clocks;
+ goto err_reset;
}
error = sysc_reset(ddata);
if (error)
dev_err(ddata->dev, "Reset failed with %d\n", error);
- if (!ddata->legacy_mode && manage_clocks)
+ if (error && !ddata->legacy_mode)
sysc_disable_module(ddata->dev);
+err_reset:
+ if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
err_main_clocks:
- if (manage_clocks)
+ if (error)
sysc_disable_main_clocks(ddata);
err_opt_clocks:
/* No re-enable of clockdomain autoidle to prevent module autoidle */
- if (manage_clocks) {
+ if (error) {
sysc_disable_opt_clocks(ddata);
sysc_clkdm_allow_idle(ddata);
}
@@ -1794,9 +1778,8 @@ static int sysc_child_add_named_clock(struct sysc *ddata,
clk = clk_get(child, name);
if (!IS_ERR(clk)) {
- clk_put(clk);
-
- return -EEXIST;
+ error = -EEXIST;
+ goto put_clk;
}
clk = clk_get(ddata->dev, name);
@@ -1806,7 +1789,7 @@ static int sysc_child_add_named_clock(struct sysc *ddata,
l = clkdev_create(clk, name, dev_name(child));
if (!l)
error = -ENOMEM;
-
+put_clk:
clk_put(clk);
return error;
@@ -2460,10 +2443,17 @@ static int sysc_probe(struct platform_device *pdev)
goto unprepare;
}
- /* Balance reset counts */
- if (ddata->rsts)
+ /* Balance use counts as PM runtime should have enabled these all */
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
reset_control_assert(ddata->rsts);
+ if (!(ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
+ sysc_disable_main_clocks(ddata);
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index f6955888e676..47098648502d 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -102,14 +102,13 @@ agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
int size, pgprot_t page_prot)
{
struct agp_segment_priv *seg;
- int num_segments, i;
+ int i;
off_t pg_start;
size_t pg_count;
pg_start = offset / 4096;
pg_count = size / 4096;
seg = *(client->segments);
- num_segments = client->num_segments;
for (i = 0; i < client->num_segments; i++) {
if ((seg[i].pg_start == pg_start) &&
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index df1edb5ec0ad..ab154a75acf0 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -207,6 +207,7 @@ EXPORT_SYMBOL(agp_free_memory);
/**
* agp_allocate_memory - allocate a group of pages of a certain type.
*
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
* @page_count: size_t argument of the number of pages
* @type: u32 argument of the type of memory to be allocated.
*
@@ -355,6 +356,7 @@ EXPORT_SYMBOL_GPL(agp_num_entries);
/**
* agp_copy_info - copy bridge state information
*
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
* @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
*
* This function copies information about the agp bridge device and the state of
@@ -850,7 +852,6 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
{
char *table;
char *table_end;
- int size;
int page_order;
int num_entries;
int i;
@@ -864,25 +865,22 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
table = NULL;
i = bridge->aperture_size_idx;
temp = bridge->current_size;
- size = page_order = num_entries = 0;
+ page_order = num_entries = 0;
if (bridge->driver->size_type != FIXED_APER_SIZE) {
do {
switch (bridge->driver->size_type) {
case U8_APER_SIZE:
- size = A_SIZE_8(temp)->size;
page_order =
A_SIZE_8(temp)->page_order;
num_entries =
A_SIZE_8(temp)->num_entries;
break;
case U16_APER_SIZE:
- size = A_SIZE_16(temp)->size;
page_order = A_SIZE_16(temp)->page_order;
num_entries = A_SIZE_16(temp)->num_entries;
break;
case U32_APER_SIZE:
- size = A_SIZE_32(temp)->size;
page_order = A_SIZE_32(temp)->page_order;
num_entries = A_SIZE_32(temp)->num_entries;
break;
@@ -890,7 +888,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
case FIXED_APER_SIZE:
case LVL2_APER_SIZE:
default:
- size = page_order = num_entries = 0;
+ page_order = num_entries = 0;
break;
}
@@ -920,7 +918,6 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
}
} while (!table && (i < bridge->driver->num_aperture_sizes));
} else {
- size = ((struct aper_size_info_fixed *) temp)->size;
page_order = ((struct aper_size_info_fixed *) temp)->page_order;
num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
table = alloc_gatt_pages(page_order);
@@ -1282,6 +1279,7 @@ EXPORT_SYMBOL(agp_generic_destroy_page);
/**
* agp_enable - initialise the agp point-to-point connection.
*
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
* @mode: agp mode register value to configure with.
*/
void agp_enable(struct agp_bridge_data *bridge, u32 mode)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index dc920daa6dbb..45653a0e6ecd 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -299,6 +299,11 @@ config COMMON_CLK_STM32H7
help
Support for stm32h7 SoC family clocks
+config COMMON_CLK_MMP2
+ def_bool COMMON_CLK && (MACH_MMP2_DT || MACH_MMP3_DT)
+ help
+ Support for Marvell MMP2 and MMP3 SoC clocks
+
config COMMON_CLK_BD718XX
tristate "Clock driver for ROHM BD718x7 PMIC"
depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 7bc7ac69391e..acc141adf087 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -8,7 +8,7 @@ obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
-obj-$(CONFIG_MACH_MMP2_DT) += clk-of-mmp2.o
+obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o
obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 2dbbe47e8d4f..7ed313ad6e43 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -500,7 +500,7 @@ static int __init clk_rpmh_init(void)
{
return platform_driver_register(&clk_rpmh_driver);
}
-subsys_initcall(clk_rpmh_init);
+core_initcall(clk_rpmh_init);
static void __exit clk_rpmh_exit(void)
{
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index bd32212f37e6..9b0c4ce2ef4e 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -2855,7 +2855,7 @@ static int __init gcc_qcs404_init(void)
{
return platform_driver_register(&gcc_qcs404_driver);
}
-subsys_initcall(gcc_qcs404_init);
+core_initcall(gcc_qcs404_init);
static void __exit gcc_qcs404_exit(void)
{
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index d2142fe46a8e..f7b370f3acef 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3628,7 +3628,7 @@ static int __init gcc_sdm845_init(void)
{
return platform_driver_register(&gcc_sdm845_driver);
}
-subsys_initcall(gcc_sdm845_init);
+core_initcall(gcc_sdm845_init);
static void __exit gcc_sdm845_exit(void)
{
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f35a53ce8988..5fdd76cb1768 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -528,6 +528,7 @@ config SH_TIMER_MTU2
config RENESAS_OSTM
bool "Renesas OSTM timer driver" if COMPILE_TEST
select CLKSRC_MMIO
+ select TIMER_OF
help
Enables the support for the Renesas OSTM.
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 9f09a59161e7..5b39d3701fa3 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np)
}
clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clk!\n");
+ return PTR_ERR(clk);
+ }
ret = clk_prepare_enable(clk);
if (ret) {
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 37c39b901bb1..3d06ba66008c 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -6,14 +6,14 @@
* Copyright (C) 2017 Chris Brandt
*/
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
+#include "timer-of.h"
+
/*
* The OSTM contains independent channels.
* The first OSTM channel probed will be set up as a free running
@@ -24,12 +24,6 @@
* driven clock event.
*/
-struct ostm_device {
- void __iomem *base;
- unsigned long ticks_per_jiffy;
- struct clock_event_device ced;
-};
-
static void __iomem *system_clock; /* For sched_clock() */
/* OSTM REGISTERS */
@@ -47,41 +41,32 @@ static void __iomem *system_clock; /* For sched_clock() */
#define CTL_ONESHOT 0x02
#define CTL_FREERUN 0x02
-static struct ostm_device *ced_to_ostm(struct clock_event_device *ced)
-{
- return container_of(ced, struct ostm_device, ced);
-}
-
-static void ostm_timer_stop(struct ostm_device *ostm)
+static void ostm_timer_stop(struct timer_of *to)
{
- if (readb(ostm->base + OSTM_TE) & TE) {
- writeb(TT, ostm->base + OSTM_TT);
+ if (readb(timer_of_base(to) + OSTM_TE) & TE) {
+ writeb(TT, timer_of_base(to) + OSTM_TT);
/*
* Read back the register simply to confirm the write operation
* has completed since I/O writes can sometimes get queued by
* the bus architecture.
*/
- while (readb(ostm->base + OSTM_TE) & TE)
+ while (readb(timer_of_base(to) + OSTM_TE) & TE)
;
}
}
-static int __init ostm_init_clksrc(struct ostm_device *ostm, unsigned long rate)
+static int __init ostm_init_clksrc(struct timer_of *to)
{
- /*
- * irq not used (clock sources don't use interrupts)
- */
-
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(0, ostm->base + OSTM_CMP);
- writeb(CTL_FREERUN, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(0, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_FREERUN, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
- return clocksource_mmio_init(ostm->base + OSTM_CNT,
- "ostm", rate,
- 300, 32, clocksource_mmio_readl_up);
+ return clocksource_mmio_init(timer_of_base(to) + OSTM_CNT,
+ to->np->full_name, timer_of_rate(to), 300,
+ 32, clocksource_mmio_readl_up);
}
static u64 notrace ostm_read_sched_clock(void)
@@ -89,87 +74,75 @@ static u64 notrace ostm_read_sched_clock(void)
return readl(system_clock);
}
-static void __init ostm_init_sched_clock(struct ostm_device *ostm,
- unsigned long rate)
+static void __init ostm_init_sched_clock(struct timer_of *to)
{
- system_clock = ostm->base + OSTM_CNT;
- sched_clock_register(ostm_read_sched_clock, 32, rate);
+ system_clock = timer_of_base(to) + OSTM_CNT;
+ sched_clock_register(ostm_read_sched_clock, 32, timer_of_rate(to));
}
static int ostm_clock_event_next(unsigned long delta,
- struct clock_event_device *ced)
+ struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(delta, ostm->base + OSTM_CMP);
- writeb(CTL_ONESHOT, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(delta, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_ONESHOT, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_shutdown(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
return 0;
}
static int ostm_set_periodic(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(ostm->ticks_per_jiffy - 1, ostm->base + OSTM_CMP);
- writeb(CTL_PERIODIC, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(timer_of_period(to) - 1, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_PERIODIC, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_set_oneshot(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
return 0;
}
static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id)
{
- struct ostm_device *ostm = dev_id;
+ struct clock_event_device *ced = dev_id;
- if (clockevent_state_oneshot(&ostm->ced))
- ostm_timer_stop(ostm);
+ if (clockevent_state_oneshot(ced))
+ ostm_timer_stop(to_timer_of(ced));
/* notify clockevent layer */
- if (ostm->ced.event_handler)
- ostm->ced.event_handler(&ostm->ced);
+ if (ced->event_handler)
+ ced->event_handler(ced);
return IRQ_HANDLED;
}
-static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
- unsigned long rate)
+static int __init ostm_init_clkevt(struct timer_of *to)
{
- struct clock_event_device *ced = &ostm->ced;
- int ret = -ENXIO;
-
- ret = request_irq(irq, ostm_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL,
- "ostm", ostm);
- if (ret) {
- pr_err("ostm: failed to request irq\n");
- return ret;
- }
+ struct clock_event_device *ced = &to->clkevt;
- ced->name = "ostm";
ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
ced->set_state_shutdown = ostm_shutdown;
ced->set_state_periodic = ostm_set_periodic;
@@ -178,79 +151,61 @@ static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
ced->shift = 32;
ced->rating = 300;
ced->cpumask = cpumask_of(0);
- clockevents_config_and_register(ced, rate, 0xf, 0xffffffff);
+ clockevents_config_and_register(ced, timer_of_rate(to), 0xf,
+ 0xffffffff);
return 0;
}
static int __init ostm_init(struct device_node *np)
{
- struct ostm_device *ostm;
- int ret = -EFAULT;
- struct clk *ostm_clk = NULL;
- int irq;
- unsigned long rate;
-
- ostm = kzalloc(sizeof(*ostm), GFP_KERNEL);
- if (!ostm)
- return -ENOMEM;
-
- ostm->base = of_iomap(np, 0);
- if (!ostm->base) {
- pr_err("ostm: failed to remap I/O memory\n");
- goto err;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq < 0) {
- pr_err("ostm: Failed to get irq\n");
- goto err;
- }
+ struct timer_of *to;
+ int ret;
- ostm_clk = of_clk_get(np, 0);
- if (IS_ERR(ostm_clk)) {
- pr_err("ostm: Failed to get clock\n");
- ostm_clk = NULL;
- goto err;
- }
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- ret = clk_prepare_enable(ostm_clk);
- if (ret) {
- pr_err("ostm: Failed to enable clock\n");
- goto err;
+ to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
+ if (system_clock) {
+ /*
+ * clock sources don't use interrupts, clock events do
+ */
+ to->flags |= TIMER_OF_IRQ;
+ to->of_irq.flags = IRQF_TIMER | IRQF_IRQPOLL;
+ to->of_irq.handler = ostm_timer_interrupt;
}
- rate = clk_get_rate(ostm_clk);
- ostm->ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+ ret = timer_of_init(np, to);
+ if (ret)
+ goto err_free;
/*
* First probed device will be used as system clocksource. Any
* additional devices will be used as clock events.
*/
if (!system_clock) {
- ret = ostm_init_clksrc(ostm, rate);
-
- if (!ret) {
- ostm_init_sched_clock(ostm, rate);
- pr_info("ostm: used for clocksource\n");
- }
+ ret = ostm_init_clksrc(to);
+ if (ret)
+ goto err_cleanup;
+ ostm_init_sched_clock(to);
+ pr_info("%pOF: used for clocksource\n", np);
} else {
- ret = ostm_init_clkevt(ostm, irq, rate);
+ ret = ostm_init_clkevt(to);
+ if (ret)
+ goto err_cleanup;
- if (!ret)
- pr_info("ostm: used for clock events\n");
- }
-
-err:
- if (ret) {
- clk_disable_unprepare(ostm_clk);
- iounmap(ostm->base);
- kfree(ostm);
- return ret;
+ pr_info("%pOF: used for clock events\n", np);
}
return 0;
+
+err_cleanup:
+ timer_of_cleanup(to);
+err_free:
+ kfree(to);
+ return ret;
}
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 11ff701ff4bb..572da477c6d3 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -57,8 +57,8 @@ static __init int timer_of_irq_init(struct device_node *np,
if (of_irq->name) {
of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
if (ret < 0) {
- pr_err("Failed to get interrupt %s for %s\n",
- of_irq->name, np->full_name);
+ pr_err("Failed to get interrupt %s for %pOF\n",
+ of_irq->name, np);
return ret;
}
} else {
@@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
}
if (!to->clkevt.name)
- to->clkevt.name = np->name;
+ to->clkevt.name = np->full_name;
to->np = np;
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 35b4f700f054..58151ca56695 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -48,9 +48,9 @@ config PPC_PASEMI_CPUFREQ
PWRficient processors.
config POWERNV_CPUFREQ
- tristate "CPU frequency scaling for IBM POWERNV platform"
- depends on PPC_POWERNV
- default y
- help
+ tristate "CPU frequency scaling for IBM POWERNV platform"
+ depends on PPC_POWERNV
+ default y
+ help
This adds support for CPU frequency switching on IBM POWERNV
platform
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index dfa6457deaf6..a6528388952e 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -4,17 +4,17 @@
#
config X86_INTEL_PSTATE
- bool "Intel P state control"
- depends on X86
- select ACPI_PROCESSOR if ACPI
- select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
- help
- This driver provides a P state for Intel core processors.
+ bool "Intel P state control"
+ depends on X86
+ select ACPI_PROCESSOR if ACPI
+ select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
+ help
+ This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
- the scaling driver and governor for Sandy bridge processors.
+ the scaling driver and governor for Sandy bridge processors.
When this driver is enabled it will become the preferred
- scaling driver for Sandy bridge processors.
+ scaling driver for Sandy bridge processors.
If in doubt, say N.
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 54bc76743b1f..f1d170dcf4d3 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -180,4 +180,4 @@ create_pdev:
-1, data,
sizeof(struct cpufreq_dt_platform_data)));
}
-device_initcall(cpufreq_dt_platdev_init);
+core_initcall(cpufreq_dt_platdev_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b66e81c06a57..737ff3b9c2c0 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -346,7 +346,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
return CPU_FREQ_GOV_CONSERVATIVE;
}
-fs_initcall(cpufreq_gov_dbs_init);
+core_initcall(cpufreq_gov_dbs_init);
#else
module_init(cpufreq_gov_dbs_init);
#endif
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index dced033875bf..82a4d37ddecb 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -483,7 +483,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
return CPU_FREQ_GOV_ONDEMAND;
}
-fs_initcall(cpufreq_gov_dbs_init);
+core_initcall(cpufreq_gov_dbs_init);
#else
module_init(cpufreq_gov_dbs_init);
#endif
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index aaa04dfcacd9..def9afe0f5b8 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -50,5 +50,5 @@ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
MODULE_LICENSE("GPL");
-fs_initcall(cpufreq_gov_performance_init);
+core_initcall(cpufreq_gov_performance_init);
module_exit(cpufreq_gov_performance_exit);
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index c143dc237d87..1ae66019eb83 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -43,7 +43,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
return &cpufreq_gov_powersave;
}
-fs_initcall(cpufreq_gov_powersave_init);
+core_initcall(cpufreq_gov_powersave_init);
#else
module_init(cpufreq_gov_powersave_init);
#endif
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cbd81c58cb8f..b43e7cd502c5 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -147,7 +147,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
return &cpufreq_gov_userspace;
}
-fs_initcall(cpufreq_gov_userspace_init);
+core_initcall(cpufreq_gov_userspace_init);
#else
module_init(cpufreq_gov_userspace_init);
#endif
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index a9ae2f84a4ef..fc92a8842e25 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -334,7 +334,7 @@ static int __init qcom_cpufreq_hw_init(void)
{
return platform_driver_register(&qcom_cpufreq_hw_driver);
}
-device_initcall(qcom_cpufreq_hw_init);
+postcore_initcall(qcom_cpufreq_hw_init);
static void __exit qcom_cpufreq_hw_exit(void)
{
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 4f0c637b3b49..7a1ea6fdcab6 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -128,8 +129,66 @@ out_put_np:
return ret;
}
+static int __maybe_unused tegra124_cpufreq_suspend(struct device *dev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(dev);
+ int err;
+
+ /*
+ * PLLP rate 408Mhz is below the CPU Fmax at Vmin and is safe to
+ * use during suspend and resume. So, switch the CPU clock source
+ * to PLLP and disable DFLL.
+ */
+ err = clk_set_parent(priv->cpu_clk, priv->pllp_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to reparent to PLLP: %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(priv->dfll_clk);
+
+ return 0;
+}
+
+static int __maybe_unused tegra124_cpufreq_resume(struct device *dev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(dev);
+ int err;
+
+ /*
+ * Warmboot code powers up the CPU with PLLP clock source.
+ * Enable DFLL clock and switch CPU clock source back to DFLL.
+ */
+ err = clk_prepare_enable(priv->dfll_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable DFLL clock for CPU: %d\n", err);
+ goto disable_cpufreq;
+ }
+
+ err = clk_set_parent(priv->cpu_clk, priv->dfll_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to reparent to DFLL clock: %d\n", err);
+ goto disable_dfll;
+ }
+
+ return 0;
+
+disable_dfll:
+ clk_disable_unprepare(priv->dfll_clk);
+disable_cpufreq:
+ disable_cpufreq();
+
+ return err;
+}
+
+static const struct dev_pm_ops tegra124_cpufreq_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend,
+ tegra124_cpufreq_resume)
+};
+
static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
+ .driver.pm = &tegra124_cpufreq_pm_ops,
.probe = tegra124_cpufreq_probe,
};
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 88727b7c0d59..c0aeedd66f02 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -16,7 +16,7 @@ config CPU_IDLE
if CPU_IDLE
config CPU_IDLE_MULTIPLE_DRIVERS
- bool
+ bool
config CPU_IDLE_GOV_LADDER
bool "Ladder governor (for periodic timer tick)"
@@ -63,13 +63,13 @@ source "drivers/cpuidle/Kconfig.powerpc"
endmenu
config HALTPOLL_CPUIDLE
- tristate "Halt poll cpuidle driver"
- depends on X86 && KVM_GUEST
- default y
- help
- This option enables halt poll cpuidle driver, which allows to poll
- before halting in the guest (more efficient than polling in the
- host via halt_poll_ns for some scenarios).
+ tristate "Halt poll cpuidle driver"
+ depends on X86 && KVM_GUEST
+ default y
+ help
+ This option enables halt poll cpuidle driver, which allows to poll
+ before halting in the guest (more efficient than polling in the
+ host via halt_poll_ns for some scenarios).
endif
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index d8530475493c..a224d33dda7f 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -3,15 +3,15 @@
# ARM CPU Idle drivers
#
config ARM_CPUIDLE
- bool "Generic ARM/ARM64 CPU idle Driver"
- select DT_IDLE_STATES
+ bool "Generic ARM/ARM64 CPU idle Driver"
+ select DT_IDLE_STATES
select CPU_IDLE_MULTIPLE_DRIVERS
- help
- Select this to enable generic cpuidle driver for ARM.
- It provides a generic idle driver whose idle states are configured
- at run-time through DT nodes. The CPUidle suspend backend is
- initialized by calling the CPU operations init idle hook
- provided by architecture code.
+ help
+ Select this to enable generic cpuidle driver for ARM.
+ It provides a generic idle driver whose idle states are configured
+ at run-time through DT nodes. The CPUidle suspend backend is
+ initialized by calling the CPU operations init idle hook
+ provided by architecture code.
config ARM_PSCI_CPUIDLE
bool "PSCI CPU idle Driver"
@@ -65,21 +65,21 @@ config ARM_U8500_CPUIDLE
bool "Cpu Idle Driver for the ST-E u8500 processors"
depends on ARCH_U8500 && !ARM64
help
- Select this to enable cpuidle for ST-E u8500 processors
+ Select this to enable cpuidle for ST-E u8500 processors.
config ARM_AT91_CPUIDLE
bool "Cpu Idle Driver for the AT91 processors"
default y
depends on ARCH_AT91 && !ARM64
help
- Select this to enable cpuidle for AT91 processors
+ Select this to enable cpuidle for AT91 processors.
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
depends on ARCH_EXYNOS && !ARM64
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
- Select this to enable cpuidle for Exynos processors
+ Select this to enable cpuidle for Exynos processors.
config ARM_MVEBU_V7_CPUIDLE
bool "CPU Idle Driver for mvebu v7 family processors"
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 569dbac443bd..0005be5ea2b4 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -572,7 +572,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
return -EINVAL;
for (i = 0; i < drv->state_count; i++)
- if (drv->states[i].disabled)
+ if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
per_cpu(cpuidle_devices, dev->cpu) = dev;
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 9f1ace9c53da..f7e83613ae94 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -53,7 +53,6 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
state->target_residency_ns = 0;
state->power_usage = -1;
state->enter = poll_idle;
- state->disabled = false;
state->flags = CPUIDLE_FLAG_POLLING;
}
EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43ed1b621718..91eb768d4221 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -289,6 +289,7 @@ config CRYPTO_DEV_TALITOS
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_HASH
+ select CRYPTO_LIB_DES
select HW_RANDOM
depends on FSL_SOC
help
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index dc1eb97d57f7..62b04e19067c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -179,14 +179,14 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
- if (!qp_ctx->c_in_pool) {
+ if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
goto err_free_req_list;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
- if (!qp_ctx->c_out_pool) {
+ if (IS_ERR(qp_ctx->c_out_pool)) {
dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index f840e61e5a27..425149e8bab0 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -921,7 +921,9 @@ int devfreq_suspend_device(struct devfreq *devfreq)
}
if (devfreq->suspend_freq) {
+ mutex_lock(&devfreq->lock);
ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
+ mutex_unlock(&devfreq->lock);
if (ret)
return ret;
}
@@ -949,7 +951,9 @@ int devfreq_resume_device(struct devfreq *devfreq)
return 0;
if (devfreq->resume_freq) {
+ mutex_lock(&devfreq->lock);
ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
+ mutex_unlock(&devfreq->lock);
if (ret)
return ret;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 7af874b69ffb..6fa1eba9d477 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -15,19 +15,19 @@ menuconfig DMADEVICES
be empty in some cases.
config DMADEVICES_DEBUG
- bool "DMA Engine debugging"
- depends on DMADEVICES != n
- help
- This is an option for use by developers; most people should
- say N here. This enables DMA engine core and driver debugging.
+ bool "DMA Engine debugging"
+ depends on DMADEVICES != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables DMA engine core and driver debugging.
config DMADEVICES_VDEBUG
- bool "DMA Engine verbose debugging"
- depends on DMADEVICES_DEBUG != n
- help
- This is an option for use by developers; most people should
- say N here. This enables deeper (more verbose) debugging of
- the DMA engine core and drivers.
+ bool "DMA Engine verbose debugging"
+ depends on DMADEVICES_DEBUG != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables deeper (more verbose) debugging of
+ the DMA engine core and drivers.
if DMADEVICES
@@ -215,28 +215,28 @@ config FSL_EDMA
This module can be found on Freescale Vybrid and LS-1 SoCs.
config FSL_QDMA
- tristate "NXP Layerscape qDMA engine support"
- depends on ARM || ARM64
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- select DMA_ENGINE_RAID
- select ASYNC_TX_ENABLE_CHANNEL_SWITCH
- help
- Support the NXP Layerscape qDMA engine with command queue and legacy mode.
- Channel virtualization is supported through enqueuing of DMA jobs to,
- or dequeuing DMA jobs from, different work queues.
- This module can be found on NXP Layerscape SoCs.
+ tristate "NXP Layerscape qDMA engine support"
+ depends on ARM || ARM64
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+ Channel virtualization is supported through enqueuing of DMA jobs to,
+ or dequeuing DMA jobs from, different work queues.
+ This module can be found on NXP Layerscape SoCs.
The qdma driver only work on SoCs with a DPAA hardware block.
config FSL_RAID
- tristate "Freescale RAID engine Support"
- depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
- select DMA_ENGINE
- select DMA_ENGINE_RAID
- ---help---
- Enable support for Freescale RAID Engine. RAID Engine is
- available on some QorIQ SoCs (like P5020/P5040). It has
- the capability to offload memcpy, xor and pq computation
+ tristate "Freescale RAID engine Support"
+ depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ ---help---
+ Enable support for Freescale RAID Engine. RAID Engine is
+ available on some QorIQ SoCs (like P5020/P5040). It has
+ the capability to offload memcpy, xor and pq computation
for raid5/6.
config IMG_MDC_DMA
@@ -342,6 +342,26 @@ config MCF_EDMA
minimal intervention from a host processor.
This module can be found on Freescale ColdFire mcf5441x SoCs.
+config MILBEAUT_HDMAC
+ tristate "Milbeaut AHB DMA support"
+ depends on ARCH_MILBEAUT || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Say yes here to support the Socionext Milbeaut
+ HDMAC device.
+
+config MILBEAUT_XDMAC
+ tristate "Milbeaut AXI DMA support"
+ depends on ARCH_MILBEAUT || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Say yes here to support the Socionext Milbeaut
+ XDMAC device.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
@@ -635,6 +655,10 @@ config XILINX_DMA
destination address.
AXI DMA engine provides high-bandwidth one dimensional direct
memory access between memory and AXI4-Stream target peripherals.
+ AXI MCDMA engine provides high-bandwidth direct memory access
+ between memory and AXI4-Stream target peripherals. It provides
+ the scatter gather interface with multiple channels independent
+ configuration support.
config XILINX_ZYNQMP_DMA
tristate "Xilinx ZynqMP DMA Engine"
@@ -665,10 +689,14 @@ source "drivers/dma/dw-edma/Kconfig"
source "drivers/dma/hsu/Kconfig"
+source "drivers/dma/sf-pdma/Kconfig"
+
source "drivers/dma/sh/Kconfig"
source "drivers/dma/ti/Kconfig"
+source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f5ce8665e944..42d7e2fc64fa 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -45,6 +45,8 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
+obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
@@ -60,6 +62,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_SF_PDMA) += sf-pdma/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
+obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-y += mediatek/
obj-y += qcom/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b58ac720d9a1..f71c9f77d405 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1957,21 +1957,16 @@ static int atmel_xdmac_resume(struct device *dev)
static int at_xdmac_probe(struct platform_device *pdev)
{
- struct resource *res;
struct at_xdmac *atxdmac;
int irq, size, nr_channels, i, ret;
void __iomem *base;
u32 reg;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index cafb1cc065bb..fa626acdc9b9 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -858,13 +858,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma->soc_data = soc_data;
platform_set_drvdata(pdev, jzdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O memory\n");
- return -EINVAL;
- }
-
- jzdma->chn_base = devm_ioremap_resource(dev, res);
+ jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jzdma->chn_base))
return PTR_ERR(jzdma->chn_base);
@@ -987,6 +981,7 @@ static int jz4780_dma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
+ clk_disable_unprepare(jzdma->clk);
free_irq(jzdma->irq, jzdma);
for (i = 0; i < jzdma->soc_data->nb_channels; i++)
@@ -1019,11 +1014,18 @@ static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
.flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
+static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
+ .nb_channels = 8,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
+ { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index c90c798e5ec3..0585d749d935 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev)
data->chip = chip;
- chip->clk = devm_clk_get(chip->dev, "hclk");
+ chip->clk = devm_clk_get_optional(chip->dev, "hclk");
if (IS_ERR(chip->clk))
return PTR_ERR(chip->clk);
err = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/fsl-dpaa2-qdma/Kconfig b/drivers/dma/fsl-dpaa2-qdma/Kconfig
new file mode 100644
index 000000000000..258ed6be934d
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/Kconfig
@@ -0,0 +1,9 @@
+menuconfig FSL_DPAA2_QDMA
+ tristate "NXP DPAA2 QDMA"
+ depends on ARM64
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ NXP Data Path Acceleration Architecture 2 QDMA driver,
+ using the NXP MC bus driver.
diff --git a/drivers/dma/fsl-dpaa2-qdma/Makefile b/drivers/dma/fsl-dpaa2-qdma/Makefile
new file mode 100644
index 000000000000..c1d0226f2bd7
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the NXP DPAA2 qDMA controllers
+obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
new file mode 100644
index 000000000000..c70a7965f140
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmapool.h>
+#include <linux/of_irq.h>
+#include <linux/iommu.h>
+#include <linux/sys_soc.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "../virt-dma.h"
+#include "dpdmai.h"
+#include "dpaa2-qdma.h"
+
+static bool smmu_disable = true;
+
+static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
+}
+
+static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct dpaa2_qdma_comp, vdesc);
+}
+
+static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
+
+ dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
+ sizeof(struct dpaa2_fd),
+ sizeof(struct dpaa2_fd), 0);
+ if (!dpaa2_chan->fd_pool)
+ goto err;
+
+ dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
+ sizeof(struct dpaa2_fl_entry),
+ sizeof(struct dpaa2_fl_entry), 0);
+ if (!dpaa2_chan->fl_pool)
+ goto err_fd;
+
+ dpaa2_chan->sdd_pool =
+ dma_pool_create("sdd_pool", dev,
+ sizeof(struct dpaa2_qdma_sd_d),
+ sizeof(struct dpaa2_qdma_sd_d), 0);
+ if (!dpaa2_chan->sdd_pool)
+ goto err_fl;
+
+ return dpaa2_qdma->desc_allocated++;
+err_fl:
+ dma_pool_destroy(dpaa2_chan->fl_pool);
+err_fd:
+ dma_pool_destroy(dpaa2_chan->fd_pool);
+err:
+ return -ENOMEM;
+}
+
+static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+ unsigned long flags;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
+ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
+
+ dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
+ dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
+
+ dma_pool_destroy(dpaa2_chan->fd_pool);
+ dma_pool_destroy(dpaa2_chan->fl_pool);
+ dma_pool_destroy(dpaa2_chan->sdd_pool);
+ dpaa2_qdma->desc_allocated--;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct dpaa2_qdma_comp *
+dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
+{
+ struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
+ struct device *dev = &qdma_priv->dpdmai_dev->dev;
+ struct dpaa2_qdma_comp *comp_temp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+ if (list_empty(&dpaa2_chan->comp_free)) {
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
+ if (!comp_temp)
+ goto err;
+ comp_temp->fd_virt_addr =
+ dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
+ &comp_temp->fd_bus_addr);
+ if (!comp_temp->fd_virt_addr)
+ goto err_comp;
+
+ comp_temp->fl_virt_addr =
+ dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
+ &comp_temp->fl_bus_addr);
+ if (!comp_temp->fl_virt_addr)
+ goto err_fd_virt;
+
+ comp_temp->desc_virt_addr =
+ dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
+ &comp_temp->desc_bus_addr);
+ if (!comp_temp->desc_virt_addr)
+ goto err_fl_virt;
+
+ comp_temp->qchan = dpaa2_chan;
+ return comp_temp;
+ }
+
+ comp_temp = list_first_entry(&dpaa2_chan->comp_free,
+ struct dpaa2_qdma_comp, list);
+ list_del(&comp_temp->list);
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+
+ comp_temp->qchan = dpaa2_chan;
+
+ return comp_temp;
+
+err_fl_virt:
+ dma_pool_free(dpaa2_chan->fl_pool,
+ comp_temp->fl_virt_addr,
+ comp_temp->fl_bus_addr);
+err_fd_virt:
+ dma_pool_free(dpaa2_chan->fd_pool,
+ comp_temp->fd_virt_addr,
+ comp_temp->fd_bus_addr);
+err_comp:
+ kfree(comp_temp);
+err:
+ dev_err(dev, "Failed to request descriptor\n");
+ return NULL;
+}
+
+static void
+dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
+{
+ struct dpaa2_fd *fd;
+
+ fd = dpaa2_comp->fd_virt_addr;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+
+ /* fd populated */
+ dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
+
+ /*
+ * Bypass memory translation, Frame list format, short length disable
+ * we need to disable BMT if fsl-mc use iova addr
+ */
+ if (smmu_disable)
+ dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
+ dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
+
+ dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
+}
+
+/* first frame list for descriptor buffer */
+static void
+dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
+ struct dpaa2_qdma_comp *dpaa2_comp,
+ bool wrt_changed)
+{
+ struct dpaa2_qdma_sd_d *sdd;
+
+ sdd = dpaa2_comp->desc_virt_addr;
+ memset(sdd, 0, 2 * (sizeof(*sdd)));
+
+ /* source descriptor CMD */
+ sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
+ sdd++;
+
+ /* dest descriptor CMD */
+ if (wrt_changed)
+ sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
+ else
+ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
+
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ /* first frame list to source descriptor */
+ dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
+ dpaa2_fl_set_len(f_list, 0x20);
+ dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
+
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+/* source and destination frame list */
+static void
+dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len, uint8_t fmt)
+{
+ /* source frame list to source buffer */
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ dpaa2_fl_set_addr(f_list, src);
+ dpaa2_fl_set_len(f_list, len);
+
+ /* single buffer frame or scatter gather frame */
+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+
+ f_list++;
+
+ /* destination frame list to destination buffer */
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ dpaa2_fl_set_addr(f_list, dst);
+ dpaa2_fl_set_len(f_list, len);
+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+ /* single buffer frame or scatter gather frame */
+ dpaa2_fl_set_final(f_list, QDMA_FL_F);
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+static struct dma_async_tx_descriptor
+*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, ulong flags)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct dpaa2_fl_entry *f_list;
+ bool wrt_changed;
+
+ dpaa2_qdma = dpaa2_chan->qdma;
+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
+ if (!dpaa2_comp)
+ return NULL;
+
+ wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
+
+ /* populate Frame descriptor */
+ dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
+
+ f_list = dpaa2_comp->fl_virt_addr;
+
+ /* first frame list for descriptor buffer (logn format) */
+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
+
+ f_list++;
+
+ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
+
+ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
+}
+
+static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct virt_dma_desc *vdesc;
+ struct dpaa2_fd *fd;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+ spin_lock(&dpaa2_chan->vchan.lock);
+ if (vchan_issue_pending(&dpaa2_chan->vchan)) {
+ vdesc = vchan_next_desc(&dpaa2_chan->vchan);
+ if (!vdesc)
+ goto err_enqueue;
+ dpaa2_comp = to_fsl_qdma_comp(vdesc);
+
+ fd = dpaa2_comp->fd_virt_addr;
+
+ list_del(&vdesc->node);
+ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
+
+ err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
+ if (err) {
+ list_del(&dpaa2_comp->list);
+ list_add_tail(&dpaa2_comp->list,
+ &dpaa2_chan->comp_free);
+ }
+ }
+err_enqueue:
+ spin_unlock(&dpaa2_chan->vchan.lock);
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+}
+
+static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_qdma_priv *priv;
+ u8 prio_def = DPDMAI_PRIO_NUM;
+ int err = -EINVAL;
+ int i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpqdma_id = ls_dev->obj_desc.id;
+
+ /* Get the handle for the DPDMAI this interface is associate with */
+ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpdmai_open() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "Opened dpdmai object successfully\n");
+
+ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpdmai_attr);
+ if (err) {
+ dev_err(dev, "dpdmai_get_attributes() failed\n");
+ goto exit;
+ }
+
+ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+ dev_err(dev, "DPDMAI major version mismatch\n"
+ "Found %u.%u, supported version is %u.%u\n",
+ priv->dpdmai_attr.version.major,
+ priv->dpdmai_attr.version.minor,
+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+ goto exit;
+ }
+
+ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+ dev_err(dev, "DPDMAI minor version mismatch\n"
+ "Found %u.%u, supported version is %u.%u\n",
+ priv->dpdmai_attr.version.major,
+ priv->dpdmai_attr.version.minor,
+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+ goto exit;
+ }
+
+ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
+ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
+ if (!ppriv) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ priv->ppriv = ppriv;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ i, &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpdmai_get_rx_queue() failed\n");
+ goto exit;
+ }
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+
+ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ i, &priv->tx_fqid[i]);
+ if (err) {
+ dev_err(dev, "dpdmai_get_tx_queue() failed\n");
+ goto exit;
+ }
+ ppriv->req_fqid = priv->tx_fqid[i];
+ ppriv->prio = i;
+ ppriv->priv = priv;
+ ppriv++;
+ }
+
+ return 0;
+exit:
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ return err;
+}
+
+static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
+ struct dpaa2_qdma_priv_per_prio, nctx);
+ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
+ struct dpaa2_qdma_priv *priv = ppriv->priv;
+ u32 n_chans = priv->dpaa2_qdma->n_chans;
+ struct dpaa2_qdma_chan *qchan;
+ const struct dpaa2_fd *fd_eq;
+ const struct dpaa2_fd *fd;
+ struct dpaa2_dq *dq;
+ int is_last = 0;
+ int found;
+ u8 status;
+ int err;
+ int i;
+
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err);
+
+ while (!is_last) {
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ } while (!is_last && !dq);
+ if (!dq) {
+ dev_err(priv->dev, "FQID returned no valid frames!\n");
+ continue;
+ }
+
+ /* obtain FD and process the error */
+ fd = dpaa2_dq_fd(dq);
+
+ status = dpaa2_fd_get_ctrl(fd) & 0xff;
+ if (status)
+ dev_err(priv->dev, "FD error occurred\n");
+ found = 0;
+ for (i = 0; i < n_chans; i++) {
+ qchan = &priv->dpaa2_qdma->chans[i];
+ spin_lock(&qchan->queue_lock);
+ if (list_empty(&qchan->comp_used)) {
+ spin_unlock(&qchan->queue_lock);
+ continue;
+ }
+ list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
+ &qchan->comp_used, list) {
+ fd_eq = dpaa2_comp->fd_virt_addr;
+
+ if (le64_to_cpu(fd_eq->simple.addr) ==
+ le64_to_cpu(fd->simple.addr)) {
+ spin_lock(&qchan->vchan.lock);
+ vchan_cookie_complete(&
+ dpaa2_comp->vdesc);
+ spin_unlock(&qchan->vchan.lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&qchan->queue_lock);
+ if (found)
+ break;
+ }
+ }
+
+ dpaa2_io_service_rearm(NULL, ctx);
+}
+
+static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = priv->dev;
+ int err = -EINVAL;
+ int i, num;
+
+ num = priv->num_pairs;
+ ppriv = priv->ppriv;
+ for (i = 0; i < num; i++) {
+ ppriv->nctx.is_cdan = 0;
+ ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
+ ppriv->nctx.id = ppriv->rsp_fqid;
+ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
+ err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
+ if (err) {
+ dev_err(dev, "Notification register failed\n");
+ goto err_service;
+ }
+
+ ppriv->store =
+ dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
+ if (!ppriv->store) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ goto err_store;
+ }
+
+ ppriv++;
+ }
+ return 0;
+
+err_store:
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+err_service:
+ ppriv--;
+ while (ppriv >= priv->ppriv) {
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ dpaa2_io_store_destroy(ppriv->store);
+ ppriv--;
+ }
+ return err;
+}
+
+static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ dpaa2_io_store_destroy(ppriv->store);
+ ppriv++;
+ }
+}
+
+static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ struct device *dev = priv->dev;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ ppriv++;
+ }
+}
+
+static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
+{
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev;
+ int i, num;
+ int err;
+
+ ls_dev = to_fsl_mc_device(dev);
+ num = priv->num_pairs;
+ ppriv = priv->ppriv;
+ for (i = 0; i < num; i++) {
+ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
+ DPDMAI_QUEUE_OPT_DEST;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ rx_queue_cfg.dest_cfg.priority = ppriv->prio;
+ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ rx_queue_cfg.dest_cfg.priority,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpdmai_set_rx_queue() failed\n");
+ return err;
+ }
+
+ ppriv++;
+ }
+
+ return 0;
+}
+
+static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev;
+ int err = 0;
+ int i;
+
+ ls_dev = to_fsl_mc_device(dev);
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv->nctx.qman64 = 0;
+ ppriv->nctx.dpio_id = 0;
+ ppriv++;
+ }
+
+ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err)
+ dev_err(dev, "dpdmai_reset() failed\n");
+
+ return err;
+}
+
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+ struct list_head *head)
+{
+ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
+ unsigned long flags;
+
+ list_for_each_entry_safe(comp_tmp, _comp_tmp,
+ head, list) {
+ spin_lock_irqsave(&qchan->queue_lock, flags);
+ list_del(&comp_tmp->list);
+ spin_unlock_irqrestore(&qchan->queue_lock, flags);
+ dma_pool_free(qchan->fd_pool,
+ comp_tmp->fd_virt_addr,
+ comp_tmp->fd_bus_addr);
+ dma_pool_free(qchan->fl_pool,
+ comp_tmp->fl_virt_addr,
+ comp_tmp->fl_bus_addr);
+ dma_pool_free(qchan->sdd_pool,
+ comp_tmp->desc_virt_addr,
+ comp_tmp->desc_bus_addr);
+ kfree(comp_tmp);
+ }
+}
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+ struct dpaa2_qdma_chan *qchan;
+ int num, i;
+
+ num = dpaa2_qdma->n_chans;
+ for (i = 0; i < num; i++) {
+ qchan = &dpaa2_qdma->chans[i];
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
+ dma_pool_destroy(qchan->fd_pool);
+ dma_pool_destroy(qchan->fl_pool);
+ dma_pool_destroy(qchan->sdd_pool);
+ }
+}
+
+static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct dpaa2_qdma_chan *qchan;
+ unsigned long flags;
+
+ dpaa2_comp = to_fsl_qdma_comp(vdesc);
+ qchan = dpaa2_comp->qchan;
+ spin_lock_irqsave(&qchan->queue_lock, flags);
+ list_del(&dpaa2_comp->list);
+ list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
+ spin_unlock_irqrestore(&qchan->queue_lock, flags);
+}
+
+static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
+ struct dpaa2_qdma_chan *dpaa2_chan;
+ int num = priv->num_pairs;
+ int i;
+
+ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
+ for (i = 0; i < dpaa2_qdma->n_chans; i++) {
+ dpaa2_chan = &dpaa2_qdma->chans[i];
+ dpaa2_chan->qdma = dpaa2_qdma;
+ dpaa2_chan->fqid = priv->tx_fqid[i % num];
+ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
+ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
+ spin_lock_init(&dpaa2_chan->queue_lock);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_used);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_free);
+ }
+ return 0;
+}
+
+static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
+{
+ struct device *dev = &dpdmai_dev->dev;
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv *priv;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(dev, priv);
+ priv->dpdmai_dev = dpdmai_dev;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (priv->iommu_domain)
+ smmu_disable = false;
+
+ /* obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_mcportal;
+ }
+
+ /* DPDMAI initialization */
+ err = dpaa2_qdma_setup(dpdmai_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
+ goto err_dpdmai_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_qdma_dpio_setup(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPDMAI binding to DPIO */
+ err = dpaa2_dpdmai_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPDMAI enable */
+ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpdmai_enable() faile\n");
+ goto err_enable;
+ }
+
+ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
+ if (!dpaa2_qdma) {
+ err = -ENOMEM;
+ goto err_eng;
+ }
+
+ priv->dpaa2_qdma = dpaa2_qdma;
+ dpaa2_qdma->priv = priv;
+
+ dpaa2_qdma->desc_allocated = 0;
+ dpaa2_qdma->n_chans = NUM_CH;
+
+ dpaa2_dpdmai_init_channels(dpaa2_qdma);
+
+ if (soc_device_match(soc_fixup_tuning))
+ dpaa2_qdma->qdma_wrtype_fixup = true;
+ else
+ dpaa2_qdma->qdma_wrtype_fixup = false;
+
+ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
+
+ dpaa2_qdma->dma_dev.dev = dev;
+ dpaa2_qdma->dma_dev.device_alloc_chan_resources =
+ dpaa2_qdma_alloc_chan_resources;
+ dpaa2_qdma->dma_dev.device_free_chan_resources =
+ dpaa2_qdma_free_chan_resources;
+ dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
+ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
+ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
+
+ err = dma_async_device_register(&dpaa2_qdma->dma_dev);
+ if (err) {
+ dev_err(dev, "Can't register NXP QDMA engine.\n");
+ goto err_dpaa2_qdma;
+ }
+
+ return 0;
+
+err_dpaa2_qdma:
+ kfree(dpaa2_qdma);
+err_eng:
+ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_enable:
+ dpaa2_dpdmai_dpio_unbind(priv);
+err_bind:
+ dpaa2_dpmai_store_free(priv);
+ dpaa2_dpdmai_dpio_free(priv);
+err_dpio_setup:
+ kfree(priv->ppriv);
+ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_dpdmai_setup:
+ fsl_mc_portal_free(priv->mc_io);
+err_mcportal:
+ kfree(priv);
+ dev_set_drvdata(dev, NULL);
+ return err;
+}
+
+static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv *priv;
+ struct device *dev;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+ dpaa2_qdma = priv->dpaa2_qdma;
+
+ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ dpaa2_dpdmai_dpio_unbind(priv);
+ dpaa2_dpmai_store_free(priv);
+ dpaa2_dpdmai_dpio_free(priv);
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ fsl_mc_portal_free(priv->mc_io);
+ dev_set_drvdata(dev, NULL);
+ dpaa2_dpdmai_free_channels(dpaa2_qdma);
+
+ dma_async_device_unregister(&dpaa2_qdma->dma_dev);
+ kfree(priv);
+ kfree(dpaa2_qdma);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpdmai",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_qdma_driver = {
+ .driver = {
+ .name = "dpaa2-qdma",
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_qdma_probe,
+ .remove = dpaa2_qdma_remove,
+ .match_id_table = dpaa2_qdma_id_table
+};
+
+static int __init dpaa2_qdma_driver_init(void)
+{
+ return fsl_mc_driver_register(&(dpaa2_qdma_driver));
+}
+late_initcall(dpaa2_qdma_driver_init);
+
+static void __exit fsl_qdma_exit(void)
+{
+ fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
+}
+module_exit(fsl_qdma_exit);
+
+MODULE_ALIAS("platform:fsl-dpaa2-qdma");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
new file mode 100644
index 000000000000..7d571849c569
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __DPAA2_QDMA_H
+#define __DPAA2_QDMA_H
+
+#define DPAA2_QDMA_STORE_SIZE 16
+#define NUM_CH 8
+
+struct dpaa2_qdma_sd_d {
+ u32 rsv:32;
+ union {
+ struct {
+ u32 ssd:12; /* souce stride distance */
+ u32 sss:12; /* souce stride size */
+ u32 rsv1:8;
+ } sdf;
+ struct {
+ u32 dsd:12; /* Destination stride distance */
+ u32 dss:12; /* Destination stride size */
+ u32 rsv2:8;
+ } ddf;
+ } df;
+ u32 rbpcmd; /* Route-by-port command */
+ u32 cmd;
+} __attribute__((__packed__));
+
+/* Source descriptor command read transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
+/* Destination descriptor command write transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
+#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
+
+#define QMAN_FD_FMT_ENABLE BIT(0) /* frame list table enable */
+#define QMAN_FD_BMT_ENABLE BIT(15) /* bypass memory translation */
+#define QMAN_FD_BMT_DISABLE (0) /* bypass memory translation */
+#define QMAN_FD_SL_DISABLE (0) /* short lengthe disabled */
+#define QMAN_FD_SL_ENABLE BIT(14) /* short lengthe enabled */
+
+#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */
+#define QDMA_FINAL_BIT_ENABLE BIT(31) /* final bit enable */
+
+#define QDMA_FD_SHORT_FORMAT BIT(11) /* short format */
+#define QDMA_FD_LONG_FORMAT (0) /* long format */
+#define QDMA_SER_DISABLE (8) /* no notification */
+#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
+#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
+#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
+#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
+
+#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
+#define QMAN_FD_VA_DISABLE (0)/* Address used is a real address */
+/* Flow Context: 49bit physical address */
+#define QMAN_FD_CBMT_ENABLE BIT(15)
+#define QMAN_FD_CBMT_DISABLE (0) /* Flow Context: 64bit virtual address */
+#define QMAN_FD_SC_DISABLE (0) /* stashing control */
+
+#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
+#define QDMA_FL_FMT_SGE (0x2) /* Scatter gather frame */
+#define QDMA_FL_BMT_ENABLE BIT(15) /* enable bypass memory translation */
+#define QDMA_FL_BMT_DISABLE (0x0) /* enable bypass memory translation */
+#define QDMA_FL_SL_LONG (0x0)/* long length */
+#define QDMA_FL_SL_SHORT (0x1) /* short length */
+#define QDMA_FL_F (0x1)/* last frame list bit */
+
+/*Description of Frame list table structure*/
+struct dpaa2_qdma_chan {
+ struct dpaa2_qdma_engine *qdma;
+ struct virt_dma_chan vchan;
+ struct virt_dma_desc vdesc;
+ enum dma_status status;
+ u32 fqid;
+
+ /* spinlock used by dpaa2 qdma driver */
+ spinlock_t queue_lock;
+ struct dma_pool *fd_pool;
+ struct dma_pool *fl_pool;
+ struct dma_pool *sdd_pool;
+
+ struct list_head comp_used;
+ struct list_head comp_free;
+
+};
+
+struct dpaa2_qdma_comp {
+ dma_addr_t fd_bus_addr;
+ dma_addr_t fl_bus_addr;
+ dma_addr_t desc_bus_addr;
+ struct dpaa2_fd *fd_virt_addr;
+ struct dpaa2_fl_entry *fl_virt_addr;
+ struct dpaa2_qdma_sd_d *desc_virt_addr;
+ struct dpaa2_qdma_chan *qchan;
+ struct virt_dma_desc vdesc;
+ struct list_head list;
+};
+
+struct dpaa2_qdma_engine {
+ struct dma_device dma_dev;
+ u32 n_chans;
+ struct dpaa2_qdma_chan chans[NUM_CH];
+ int qdma_wrtype_fixup;
+ int desc_allocated;
+
+ struct dpaa2_qdma_priv *priv;
+};
+
+/*
+ * dpaa2_qdma_priv - driver private data
+ */
+struct dpaa2_qdma_priv {
+ int dpqdma_id;
+
+ struct iommu_domain *iommu_domain;
+ struct dpdmai_attr dpdmai_attr;
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct fsl_mc_device *dpdmai_dev;
+ u8 num_pairs;
+
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+
+ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
+ u32 tx_fqid[DPDMAI_PRIO_NUM];
+};
+
+struct dpaa2_qdma_priv_per_prio {
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+
+ struct dpaa2_io_store *store;
+ struct dpaa2_io_notification_ctx nctx;
+
+ struct dpaa2_qdma_priv *priv;
+};
+
+static struct soc_device_attribute soc_fixup_tuning[] = {
+ { .family = "QorIQ LX2160A"},
+ { },
+};
+
+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
+ sizeof(struct dpaa2_fl_entry) * 3 + \
+ sizeof(struct dpaa2_qdma_sd_d) * 2)
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma);
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+ struct list_head *head);
+#endif /* __DPAA2_QDMA_H */
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
new file mode 100644
index 000000000000..f8d22115154a
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+#include "dpdmai.h"
+
+struct dpdmai_rsp_get_attributes {
+ __le32 id;
+ u8 num_of_priorities;
+ u8 pad0[3];
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpdmai_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+};
+
+struct dpdmai_rsp_get_tx_queue {
+ __le64 pad;
+ __le32 fqid;
+};
+
+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPDMAI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\
+ MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\
+} while (0)
+
+static inline u64 mc_enc(int lsoffset, int width, u64 val)
+{
+ return (val & MAKE_UMASK64(width)) << lsoffset;
+}
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ int dpdmai_id, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ __le64 *cmd_dpdmai_id;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags, 0);
+
+ cmd_dpdmai_id = cmd.params;
+ *cmd_dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_open);
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_close);
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @token: Returned token; use in subsequent API calls
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent calls to
+ * this specific object. For objects that are created using the
+ * DPL file, call dpdmai_open() function to get an authentication
+ * token first.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ const struct dpdmai_cfg *cfg, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+ cmd_flags, 0);
+ DPDMAI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_enable);
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_disable);
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_reset);
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpdmai_attr *attr)
+{
+ struct dpdmai_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->version.major = le16_to_cpu(rsp_params->major);
+ attr->version.minor = le16_to_cpu(rsp_params->minor);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_attributes);
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
+{
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = priority;
+ cmd_params->dest_type = cfg->dest_cfg.dest_type;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue);
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, struct dpdmai_rx_queue_attr *attr)
+{
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = cmd_params->dest_type;
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue);
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @fqid: Returned Tx queue
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, u8 priority, u32 *fqid)
+{
+ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+
+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+ *fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_tx_queue);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
new file mode 100644
index 000000000000..6d785093da8e
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR 2
+#define DPDMAI_VER_MINOR 2
+
+#define DPDMAI_CMD_BASE_VERSION 0
+#define DPDMAI_CMD_ID_OFFSET 4
+
+#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \
+ DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
+#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
+
+#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
+#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMDID_FORMAT(0x004)
+#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005)
+#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006)
+
+#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010)
+#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011)
+#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012)
+#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013)
+#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014)
+#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015)
+#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016)
+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2)
+
+#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
+#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
+
+#define MAKE_UMASK64(_width) \
+ ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : (u64)-1))
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM 2
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX 0x1
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST 0x2
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ * configured with values 1-8; the entry following last valid entry
+ * should be configured with 0
+ */
+struct dpdmai_cfg {
+ u8 priorities[DPDMAI_PRIO_NUM];
+};
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @version: DPDMAI version
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+ int id;
+ /**
+ * struct version - DPDMAI version
+ * @major: DPDMAI major version
+ * @minor: DPDMAI minor version
+ */
+ struct {
+ u16 major;
+ u16 minor;
+ } version;
+ u8 num_of_priorities;
+};
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+ DPDMAI_DEST_NONE = 0,
+ DPDMAI_DEST_DPIO = 1,
+ DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+ enum dpdmai_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+ struct dpdmai_dest_cfg dest_cfg;
+ u32 options;
+ u64 user_ctx;
+
+};
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+ struct dpdmai_dest_cfg dest_cfg;
+ u64 user_ctx;
+ u32 fqid;
+};
+
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ int dpdmai_id, u16 *token);
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ const struct dpdmai_cfg *cfg, u16 *token);
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpdmai_attr *attr);
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, struct dpdmai_rx_queue_attr *attr);
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, u8 priority, u32 *fqid);
+
+#endif /* __FSL_DPDMAI_H */
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 06664fbd2d91..89792083d62c 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev)
return ret;
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+ if (fsl_qdma->irq_base < 0)
+ return fsl_qdma->irq_base;
+
fsl_qdma->feature = of_property_read_bool(np, "big-endian");
INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index a3f942a6a946..db0e274126fb 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -173,7 +173,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
&iop_chan->chain, chain_node) {
zero_sum_result |=
iop_desc_get_zero_result(grp_iter);
- pr_debug("\titer%d result: %d\n",
+ pr_debug("\titer%d result: %d\n",
grp_iter->idx, zero_sum_result);
slot_cnt -= slots_per_op;
if (slot_cnt == 0)
@@ -1359,9 +1359,11 @@ static int iop_adma_probe(struct platform_device *pdev)
iop_adma_device_clear_err_status(iop_chan);
for (i = 0; i < 3; i++) {
- irq_handler_t handler[] = { iop_adma_eot_handler,
- iop_adma_eoc_handler,
- iop_adma_err_handler };
+ static const irq_handler_t handler[] = {
+ iop_adma_eot_handler,
+ iop_adma_eoc_handler,
+ iop_adma_err_handler
+ };
int irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = -ENXIO;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 4b36c8810517..adecea51814f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -835,13 +835,8 @@ static int k3_dma_probe(struct platform_device *op)
const struct k3dma_soc_data *soc_data;
struct k3_dma_dev *d;
const struct of_device_id *of_id;
- struct resource *iores;
int i, ret, irq = 0;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
@@ -850,7 +845,7 @@ static int k3_dma_probe(struct platform_device *op)
if (!soc_data)
return -EINVAL;
- d->base = devm_ioremap_resource(&op->dev, iores);
+ d->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 723b11c190b3..6bf838e63be1 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -819,15 +819,7 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&cqdma->pc[i]->queue);
spin_lock_init(&cqdma->pc[i]->lock);
refcount_set(&cqdma->pc[i]->refcnt, 0);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_err(&pdev->dev, "No mem resource for %s\n",
- dev_name(&pdev->dev));
- return -EINVAL;
- }
-
- cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res);
+ cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(cqdma->pc[i]->base))
return PTR_ERR(cqdma->pc[i]->base);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 1a2028e1c29e..4c58da742143 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"request_irq failed with err %d\n", err);
- goto err_unregister;
+ goto err_free;
}
platform_set_drvdata(pdev, hsdma);
@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
+err_free:
+ of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index f40051d6aecb..c20e6bd4e298 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -475,7 +475,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mtk_uart_apdmadev *mtkd;
int bit_mask = 32, rc;
- struct resource *res;
struct mtk_chan *c;
unsigned int i;
@@ -532,13 +531,7 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
goto err_no_dma;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- rc = -ENODEV;
- goto err_no_dma;
- }
-
- c->base = devm_ioremap_resource(&pdev->dev, res);
+ c->base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(c->base)) {
rc = PTR_ERR(c->base);
goto err_no_dma;
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
new file mode 100644
index 000000000000..8853d442430b
--- /dev/null
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+#define MLB_HDMAC_DMACR 0x0 /* global */
+#define MLB_HDMAC_DE BIT(31)
+#define MLB_HDMAC_DS BIT(30)
+#define MLB_HDMAC_PR BIT(28)
+#define MLB_HDMAC_DH GENMASK(27, 24)
+
+#define MLB_HDMAC_CH_STRIDE 0x10
+
+#define MLB_HDMAC_DMACA 0x0 /* channel */
+#define MLB_HDMAC_EB BIT(31)
+#define MLB_HDMAC_PB BIT(30)
+#define MLB_HDMAC_ST BIT(29)
+#define MLB_HDMAC_IS GENMASK(28, 24)
+#define MLB_HDMAC_BT GENMASK(23, 20)
+#define MLB_HDMAC_BC GENMASK(19, 16)
+#define MLB_HDMAC_TC GENMASK(15, 0)
+#define MLB_HDMAC_DMACB 0x4
+#define MLB_HDMAC_TT GENMASK(31, 30)
+#define MLB_HDMAC_MS GENMASK(29, 28)
+#define MLB_HDMAC_TW GENMASK(27, 26)
+#define MLB_HDMAC_FS BIT(25)
+#define MLB_HDMAC_FD BIT(24)
+#define MLB_HDMAC_RC BIT(23)
+#define MLB_HDMAC_RS BIT(22)
+#define MLB_HDMAC_RD BIT(21)
+#define MLB_HDMAC_EI BIT(20)
+#define MLB_HDMAC_CI BIT(19)
+#define HDMAC_PAUSE 0x7
+#define MLB_HDMAC_SS GENMASK(18, 16)
+#define MLB_HDMAC_SP GENMASK(15, 12)
+#define MLB_HDMAC_DP GENMASK(11, 8)
+#define MLB_HDMAC_DMACSA 0x8
+#define MLB_HDMAC_DMACDA 0xc
+
+#define MLB_HDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+struct milbeaut_hdmac_desc {
+ struct virt_dma_desc vd;
+ struct scatterlist *sgl;
+ unsigned int sg_len;
+ unsigned int sg_cur;
+ enum dma_transfer_direction dir;
+};
+
+struct milbeaut_hdmac_chan {
+ struct virt_dma_chan vc;
+ struct milbeaut_hdmac_device *mdev;
+ struct milbeaut_hdmac_desc *md;
+ void __iomem *reg_ch_base;
+ unsigned int slave_id;
+ struct dma_slave_config cfg;
+};
+
+struct milbeaut_hdmac_device {
+ struct dma_device ddev;
+ struct clk *clk;
+ void __iomem *reg_base;
+ struct milbeaut_hdmac_chan channels[0];
+};
+
+static struct milbeaut_hdmac_chan *
+to_milbeaut_hdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct milbeaut_hdmac_chan, vc);
+}
+
+static struct milbeaut_hdmac_desc *
+to_milbeaut_hdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct milbeaut_hdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_hdmac_desc *
+milbeaut_hdmac_next_desc(struct milbeaut_hdmac_chan *mc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&mc->vc);
+ if (!vd) {
+ mc->md = NULL;
+ return NULL;
+ }
+
+ list_del(&vd->node);
+
+ mc->md = to_milbeaut_hdmac_desc(vd);
+
+ return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_hdmac_chan *mc,
+ struct milbeaut_hdmac_desc *md)
+{
+ struct scatterlist *sg;
+ u32 cb, ca, src_addr, dest_addr, len;
+ u32 width, burst;
+
+ sg = &md->sgl[md->sg_cur];
+ len = sg_dma_len(sg);
+
+ cb = MLB_HDMAC_CI | MLB_HDMAC_EI;
+ if (md->dir == DMA_MEM_TO_DEV) {
+ cb |= MLB_HDMAC_FD;
+ width = mc->cfg.dst_addr_width;
+ burst = mc->cfg.dst_maxburst;
+ src_addr = sg_dma_address(sg);
+ dest_addr = mc->cfg.dst_addr;
+ } else {
+ cb |= MLB_HDMAC_FS;
+ width = mc->cfg.src_addr_width;
+ burst = mc->cfg.src_maxburst;
+ src_addr = mc->cfg.src_addr;
+ dest_addr = sg_dma_address(sg);
+ }
+ cb |= FIELD_PREP(MLB_HDMAC_TW, (width >> 1));
+ cb |= FIELD_PREP(MLB_HDMAC_MS, 2);
+
+ writel_relaxed(MLB_HDMAC_DE, mc->mdev->reg_base + MLB_HDMAC_DMACR);
+ writel_relaxed(src_addr, mc->reg_ch_base + MLB_HDMAC_DMACSA);
+ writel_relaxed(dest_addr, mc->reg_ch_base + MLB_HDMAC_DMACDA);
+ writel_relaxed(cb, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+ ca = FIELD_PREP(MLB_HDMAC_IS, mc->slave_id);
+ if (burst == 16)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xf);
+ else if (burst == 8)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xd);
+ else if (burst == 4)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xb);
+ burst *= width;
+ ca |= FIELD_PREP(MLB_HDMAC_TC, (len / burst - 1));
+ writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ ca |= MLB_HDMAC_EB;
+ writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_hdmac_start(struct milbeaut_hdmac_chan *mc)
+{
+ struct milbeaut_hdmac_desc *md;
+
+ md = milbeaut_hdmac_next_desc(mc);
+ if (md)
+ milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_hdmac_interrupt(int irq, void *dev_id)
+{
+ struct milbeaut_hdmac_chan *mc = dev_id;
+ struct milbeaut_hdmac_desc *md;
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+
+ /* Ack and Disable irqs */
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACB);
+ val &= ~(FIELD_PREP(MLB_HDMAC_SS, HDMAC_PAUSE));
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+ val &= ~MLB_HDMAC_EI;
+ val &= ~MLB_HDMAC_CI;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+ md = mc->md;
+ if (!md)
+ goto out;
+
+ md->sg_cur++;
+
+ if (md->sg_cur >= md->sg_len) {
+ vchan_cookie_complete(&md->vd);
+ md = milbeaut_hdmac_next_desc(mc);
+ if (!md)
+ goto out;
+ }
+
+ milbeaut_chan_start(mc, md);
+
+out:
+ spin_unlock(&mc->vc.lock);
+ return IRQ_HANDLED;
+}
+
+static void milbeaut_hdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static int
+milbeaut_hdmac_chan_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+
+ spin_lock(&mc->vc.lock);
+ mc->cfg = *cfg;
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_chan_pause(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val |= MLB_HDMAC_PB;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_chan_resume(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val &= ~MLB_HDMAC_PB;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_desc *md;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ md = kzalloc(sizeof(*md), GFP_NOWAIT);
+ if (!md)
+ return NULL;
+
+ md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT);
+ if (!md->sgl) {
+ kfree(md);
+ return NULL;
+ }
+
+ for (i = 0; i < sg_len; i++)
+ md->sgl[i] = sgl[i];
+
+ md->sg_len = sg_len;
+ md->dir = direction;
+
+ return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_hdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ unsigned long flags;
+ u32 val;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val &= ~MLB_HDMAC_EB; /* disable the channel */
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+
+ if (mc->md) {
+ vchan_terminate_vdesc(&mc->md->vd);
+ mc->md = NULL;
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return 0;
+}
+
+static void milbeaut_hdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static enum dma_status milbeaut_hdmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct virt_dma_chan *vc;
+ struct virt_dma_desc *vd;
+ struct milbeaut_hdmac_chan *mc;
+ struct milbeaut_hdmac_desc *md = NULL;
+ enum dma_status stat;
+ unsigned long flags;
+ int i;
+
+ stat = dma_cookie_status(chan, cookie, txstate);
+ /* Return immediately if we do not need to compute the residue. */
+ if (stat == DMA_COMPLETE || !txstate)
+ return stat;
+
+ vc = to_virt_chan(chan);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ mc = to_milbeaut_hdmac_chan(vc);
+
+ /* residue from the on-flight chunk */
+ if (mc->md && mc->md->vd.tx.cookie == cookie) {
+ struct scatterlist *sg;
+ u32 done;
+
+ md = mc->md;
+ sg = &md->sgl[md->sg_cur];
+
+ if (md->dir == DMA_DEV_TO_MEM)
+ done = readl_relaxed(mc->reg_ch_base
+ + MLB_HDMAC_DMACDA);
+ else
+ done = readl_relaxed(mc->reg_ch_base
+ + MLB_HDMAC_DMACSA);
+ done -= sg_dma_address(sg);
+
+ txstate->residue = -done;
+ }
+
+ if (!md) {
+ vd = vchan_find_desc(vc, cookie);
+ if (vd)
+ md = to_milbeaut_hdmac_desc(vd);
+ }
+
+ if (md) {
+ /* residue from the queued chunks */
+ for (i = md->sg_cur; i < md->sg_len; i++)
+ txstate->residue += sg_dma_len(&md->sgl[i]);
+ }
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return stat;
+}
+
+static void milbeaut_hdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !mc->md)
+ milbeaut_hdmac_start(mc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_hdmac_desc_free(struct virt_dma_desc *vd)
+{
+ struct milbeaut_hdmac_desc *md = to_milbeaut_hdmac_desc(vd);
+
+ kfree(md->sgl);
+ kfree(md);
+}
+
+static struct dma_chan *
+milbeaut_hdmac_xlate(struct of_phandle_args *dma_spec, struct of_dma *of_dma)
+{
+ struct milbeaut_hdmac_device *mdev = of_dma->of_dma_data;
+ struct milbeaut_hdmac_chan *mc;
+ struct virt_dma_chan *vc;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&mdev->ddev);
+ if (!chan)
+ return NULL;
+
+ vc = to_virt_chan(chan);
+ mc = to_milbeaut_hdmac_chan(vc);
+ mc->slave_id = dma_spec->args[0];
+
+ return chan;
+}
+
+static int milbeaut_hdmac_chan_init(struct platform_device *pdev,
+ struct milbeaut_hdmac_device *mdev,
+ int chan_id)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_hdmac_chan *mc = &mdev->channels[chan_id];
+ char *irq_name;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, chan_id);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-hdmac-%d",
+ chan_id);
+ if (!irq_name)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, milbeaut_hdmac_interrupt,
+ IRQF_SHARED, irq_name, mc);
+ if (ret)
+ return ret;
+
+ mc->mdev = mdev;
+ mc->reg_ch_base = mdev->reg_base + MLB_HDMAC_CH_STRIDE * (chan_id + 1);
+ mc->vc.desc_free = milbeaut_hdmac_desc_free;
+ vchan_init(&mc->vc, &mdev->ddev);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_hdmac_device *mdev;
+ struct dma_device *ddev;
+ int nr_chans, ret, i;
+
+ nr_chans = platform_irq_count(pdev);
+ if (nr_chans < 0)
+ return nr_chans;
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->reg_base))
+ return PTR_ERR(mdev->reg_base);
+
+ mdev->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mdev->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(mdev->clk);
+ }
+
+ ret = clk_prepare_enable(mdev->clk);
+ if (ret)
+ return ret;
+
+ ddev = &mdev->ddev;
+ ddev->dev = dev;
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ ddev->src_addr_widths = MLB_HDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = MLB_HDMAC_BUSWIDTHS;
+ ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ ddev->device_free_chan_resources = milbeaut_hdmac_free_chan_resources;
+ ddev->device_config = milbeaut_hdmac_chan_config;
+ ddev->device_pause = milbeaut_hdmac_chan_pause;
+ ddev->device_resume = milbeaut_hdmac_chan_resume;
+ ddev->device_prep_slave_sg = milbeaut_hdmac_prep_slave_sg;
+ ddev->device_terminate_all = milbeaut_hdmac_terminate_all;
+ ddev->device_synchronize = milbeaut_hdmac_synchronize;
+ ddev->device_tx_status = milbeaut_hdmac_tx_status;
+ ddev->device_issue_pending = milbeaut_hdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++) {
+ ret = milbeaut_hdmac_chan_init(pdev, mdev, i);
+ if (ret)
+ goto disable_clk;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret)
+ goto disable_clk;
+
+ ret = of_dma_controller_register(dev->of_node,
+ milbeaut_hdmac_xlate, mdev);
+ if (ret)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, mdev);
+
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(ddev);
+disable_clk:
+ clk_disable_unprepare(mdev->clk);
+
+ return ret;
+}
+
+static int milbeaut_hdmac_remove(struct platform_device *pdev)
+{
+ struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ milbeaut_hdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&mdev->ddev);
+ clk_disable_unprepare(mdev->clk);
+
+ return 0;
+}
+
+static const struct of_device_id milbeaut_hdmac_match[] = {
+ { .compatible = "socionext,milbeaut-m10v-hdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
+
+static struct platform_driver milbeaut_hdmac_driver = {
+ .probe = milbeaut_hdmac_probe,
+ .remove = milbeaut_hdmac_remove,
+ .driver = {
+ .name = "milbeaut-m10v-hdmac",
+ .of_match_table = milbeaut_hdmac_match,
+ },
+};
+module_platform_driver(milbeaut_hdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut HDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
new file mode 100644
index 000000000000..ab3d2f395378
--- /dev/null
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+/* global register */
+#define M10V_XDACS 0x00
+
+/* channel local register */
+#define M10V_XDTBC 0x10
+#define M10V_XDSSA 0x14
+#define M10V_XDDSA 0x18
+#define M10V_XDSAC 0x1C
+#define M10V_XDDAC 0x20
+#define M10V_XDDCC 0x24
+#define M10V_XDDES 0x28
+#define M10V_XDDPC 0x2C
+#define M10V_XDDSD 0x30
+
+#define M10V_XDACS_XE BIT(28)
+
+#define M10V_DEFBS 0x3
+#define M10V_DEFBL 0xf
+
+#define M10V_XDSAC_SBS GENMASK(17, 16)
+#define M10V_XDSAC_SBL GENMASK(11, 8)
+
+#define M10V_XDDAC_DBS GENMASK(17, 16)
+#define M10V_XDDAC_DBL GENMASK(11, 8)
+
+#define M10V_XDDES_CE BIT(28)
+#define M10V_XDDES_SE BIT(24)
+#define M10V_XDDES_SA BIT(15)
+#define M10V_XDDES_TF GENMASK(23, 20)
+#define M10V_XDDES_EI BIT(1)
+#define M10V_XDDES_TI BIT(0)
+
+#define M10V_XDDSD_IS_MASK GENMASK(3, 0)
+#define M10V_XDDSD_IS_NORMAL 0x8
+
+#define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct milbeaut_xdmac_desc {
+ struct virt_dma_desc vd;
+ size_t len;
+ dma_addr_t src;
+ dma_addr_t dst;
+};
+
+struct milbeaut_xdmac_chan {
+ struct virt_dma_chan vc;
+ struct milbeaut_xdmac_desc *md;
+ void __iomem *reg_ch_base;
+};
+
+struct milbeaut_xdmac_device {
+ struct dma_device ddev;
+ void __iomem *reg_base;
+ struct milbeaut_xdmac_chan channels[0];
+};
+
+static struct milbeaut_xdmac_chan *
+to_milbeaut_xdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct milbeaut_xdmac_chan, vc);
+}
+
+static struct milbeaut_xdmac_desc *
+to_milbeaut_xdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct milbeaut_xdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_xdmac_desc *
+milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan *mc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&mc->vc);
+ if (!vd) {
+ mc->md = NULL;
+ return NULL;
+ }
+
+ list_del(&vd->node);
+
+ mc->md = to_milbeaut_xdmac_desc(vd);
+
+ return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_xdmac_chan *mc,
+ struct milbeaut_xdmac_desc *md)
+{
+ u32 val;
+
+ /* Setup the channel */
+ val = md->len - 1;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDTBC);
+
+ val = md->src;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDSSA);
+
+ val = md->dst;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDSA);
+
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDSAC);
+ val &= ~(M10V_XDSAC_SBS | M10V_XDSAC_SBL);
+ val |= FIELD_PREP(M10V_XDSAC_SBS, M10V_DEFBS) |
+ FIELD_PREP(M10V_XDSAC_SBL, M10V_DEFBL);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDSAC);
+
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDDAC);
+ val &= ~(M10V_XDDAC_DBS | M10V_XDDAC_DBL);
+ val |= FIELD_PREP(M10V_XDDAC_DBS, M10V_DEFBS) |
+ FIELD_PREP(M10V_XDDAC_DBL, M10V_DEFBL);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDAC);
+
+ /* Start the channel */
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDDES);
+ val &= ~(M10V_XDDES_CE | M10V_XDDES_SE | M10V_XDDES_TF |
+ M10V_XDDES_EI | M10V_XDDES_TI);
+ val |= FIELD_PREP(M10V_XDDES_CE, 1) | FIELD_PREP(M10V_XDDES_SE, 1) |
+ FIELD_PREP(M10V_XDDES_TF, 1) | FIELD_PREP(M10V_XDDES_EI, 1) |
+ FIELD_PREP(M10V_XDDES_TI, 1);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDES);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan *mc)
+{
+ struct milbeaut_xdmac_desc *md;
+
+ md = milbeaut_xdmac_next_desc(mc);
+ if (md)
+ milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
+{
+ struct milbeaut_xdmac_chan *mc = dev_id;
+ struct milbeaut_xdmac_desc *md;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&mc->vc.lock, flags);
+
+ /* Ack and Stop */
+ val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDSD);
+
+ md = mc->md;
+ if (!md)
+ goto out;
+
+ vchan_cookie_complete(&md->vd);
+
+ milbeaut_xdmac_start(mc);
+out:
+ spin_unlock_irqrestore(&mc->vc.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void milbeaut_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_xdmac_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_desc *md;
+
+ md = kzalloc(sizeof(*md), GFP_NOWAIT);
+ if (!md)
+ return NULL;
+
+ md->len = len;
+ md->src = src;
+ md->dst = dst;
+
+ return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_xdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+ unsigned long flags;
+ u32 val;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ /* Halt the channel */
+ val = readl(mc->reg_ch_base + M10V_XDDES);
+ val &= ~M10V_XDDES_CE;
+ val |= FIELD_PREP(M10V_XDDES_CE, 0);
+ writel(val, mc->reg_ch_base + M10V_XDDES);
+
+ if (mc->md) {
+ vchan_terminate_vdesc(&mc->md->vd);
+ mc->md = NULL;
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return 0;
+}
+
+static void milbeaut_xdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static void milbeaut_xdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !mc->md)
+ milbeaut_xdmac_start(mc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_xdmac_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_milbeaut_xdmac_desc(vd));
+}
+
+static int milbeaut_xdmac_chan_init(struct platform_device *pdev,
+ struct milbeaut_xdmac_device *mdev,
+ int chan_id)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_xdmac_chan *mc = &mdev->channels[chan_id];
+ char *irq_name;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, chan_id);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d",
+ chan_id);
+ if (!irq_name)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, milbeaut_xdmac_interrupt,
+ IRQF_SHARED, irq_name, mc);
+ if (ret)
+ return ret;
+
+ mc->reg_ch_base = mdev->reg_base + chan_id * 0x30;
+
+ mc->vc.desc_free = milbeaut_xdmac_desc_free;
+ vchan_init(&mc->vc, &mdev->ddev);
+
+ return 0;
+}
+
+static void enable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+ unsigned int val;
+
+ val = readl(mdev->reg_base + M10V_XDACS);
+ val |= M10V_XDACS_XE;
+ writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static void disable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+ unsigned int val;
+
+ val = readl(mdev->reg_base + M10V_XDACS);
+ val &= ~M10V_XDACS_XE;
+ writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static int milbeaut_xdmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_xdmac_device *mdev;
+ struct dma_device *ddev;
+ int nr_chans, ret, i;
+
+ nr_chans = platform_irq_count(pdev);
+ if (nr_chans < 0)
+ return nr_chans;
+
+ mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->reg_base))
+ return PTR_ERR(mdev->reg_base);
+
+ ddev = &mdev->ddev;
+ ddev->dev = dev;
+ dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+ ddev->src_addr_widths = MLB_XDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = MLB_XDMAC_BUSWIDTHS;
+ ddev->device_free_chan_resources = milbeaut_xdmac_free_chan_resources;
+ ddev->device_prep_dma_memcpy = milbeaut_xdmac_prep_memcpy;
+ ddev->device_terminate_all = milbeaut_xdmac_terminate_all;
+ ddev->device_synchronize = milbeaut_xdmac_synchronize;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = milbeaut_xdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++) {
+ ret = milbeaut_xdmac_chan_init(pdev, mdev, i);
+ if (ret)
+ return ret;
+ }
+
+ enable_xdmac(mdev);
+
+ ret = dma_async_device_register(ddev);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register(dev->of_node,
+ of_dma_simple_xlate, mdev);
+ if (ret)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, mdev);
+
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(ddev);
+ return ret;
+}
+
+static int milbeaut_xdmac_remove(struct platform_device *pdev)
+{
+ struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ milbeaut_xdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&mdev->ddev);
+
+ disable_xdmac(mdev);
+
+ return 0;
+}
+
+static const struct of_device_id milbeaut_xdmac_match[] = {
+ { .compatible = "socionext,milbeaut-m10v-xdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
+
+static struct platform_driver milbeaut_xdmac_driver = {
+ .probe = milbeaut_xdmac_probe,
+ .remove = milbeaut_xdmac_remove,
+ .driver = {
+ .name = "milbeaut-m10v-xdmac",
+ .of_match_table = milbeaut_xdmac_match,
+ },
+};
+module_platform_driver(milbeaut_xdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 7fe494fc50d4..ad06f260e907 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -945,6 +945,8 @@ static int mmp_pdma_remove(struct platform_device *op)
struct mmp_pdma_phy *phy;
int i, irq = 0, irq_num = 0;
+ if (op->dev.of_node)
+ of_dma_controller_free(op->dev.of_node);
for (i = 0; i < pdev->dma_channels; i++) {
if (platform_get_irq(op, i) > 0)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e7d1e12bf464..10117f271b12 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -544,6 +544,9 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
return 0;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 90bbcef99ef8..023f951189a7 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1045,18 +1045,13 @@ static int owl_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct owl_dma *od;
- struct resource *res;
int ret, i, nr_channels, nr_requests;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- od->base = devm_ioremap_resource(&pdev->dev, res);
+ od->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(od->base))
return PTR_ERR(od->base);
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
new file mode 100644
index 000000000000..f8ffa02e279f
--- /dev/null
+++ b/drivers/dma/sf-pdma/Kconfig
@@ -0,0 +1,6 @@
+config SF_PDMA
+ tristate "Sifive PDMA controller driver"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the SiFive PDMA controller.
diff --git a/drivers/dma/sf-pdma/Makefile b/drivers/dma/sf-pdma/Makefile
new file mode 100644
index 000000000000..764552ab8d0a
--- /dev/null
+++ b/drivers/dma/sf-pdma/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SF_PDMA) += sf-pdma.o
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
new file mode 100644
index 000000000000..465256fe8b1f
--- /dev/null
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ * SiFive FU540-C000 v1.0
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#include "sf-pdma.h"
+
+#ifndef readq
+static inline unsigned long long readq(void __iomem *addr)
+{
+ return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(unsigned long long v, void __iomem *addr)
+{
+ writel(lower_32_bits(v), addr);
+ writel(upper_32_bits(v), addr + 4);
+}
+#endif
+
+static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
+{
+ return container_of(dchan, struct sf_pdma_chan, vchan.chan);
+}
+
+static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct sf_pdma_desc, vdesc);
+}
+
+static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
+{
+ struct sf_pdma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->desc && !chan->desc->in_use) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return chan->desc;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->chan = chan;
+
+ return desc;
+}
+
+static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
+ u64 dst, u64 src, u64 size)
+{
+ desc->xfer_type = PDMA_FULL_SPEED;
+ desc->xfer_size = size;
+ desc->dst_addr = dst;
+ desc->src_addr = src;
+}
+
+static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+
+ writel(PDMA_CLEAR_CTRL, regs->ctrl);
+}
+
+static struct dma_async_tx_descriptor *
+sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ struct sf_pdma_desc *desc;
+
+ if (chan && (!len || !dest || !src)) {
+ dev_err(chan->pdma->dma_dev.dev,
+ "Please check dma len, dest, src!\n");
+ return NULL;
+ }
+
+ desc = sf_pdma_alloc_desc(chan);
+ if (!desc)
+ return NULL;
+
+ desc->in_use = true;
+ desc->dirn = DMA_MEM_TO_MEM;
+ desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ chan->desc = desc;
+ sf_pdma_fill_desc(desc, dest, src, len);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return desc->async_tx;
+}
+
+static int sf_pdma_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *cfg)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+
+ memcpy(&chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ struct pdma_regs *regs = &chan->regs;
+
+ dma_cookie_init(dchan);
+ writel(PDMA_CLAIM_MASK, regs->ctrl);
+
+ return 0;
+}
+
+static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+
+ writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
+}
+
+static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ sf_pdma_disable_request(chan);
+ kfree(chan->desc);
+ chan->desc = NULL;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+ sf_pdma_disclaim_chan(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct virt_dma_desc *vd = NULL;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+ u64 residue = 0;
+ struct sf_pdma_desc *desc;
+ struct dma_async_tx_descriptor *tx;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ tx = &chan->desc->vdesc.tx;
+ if (cookie == tx->chan->completed_cookie)
+ goto out;
+
+ if (cookie == tx->cookie) {
+ residue = readq(regs->residue);
+ } else {
+ vd = vchan_find_desc(&chan->vchan, cookie);
+ if (!vd)
+ goto out;
+
+ desc = to_sf_pdma_desc(vd);
+ residue = desc->xfer_size;
+ }
+
+out:
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ return residue;
+}
+
+static enum dma_status
+sf_pdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ enum dma_status status;
+
+ status = dma_cookie_status(dchan, cookie, txstate);
+
+ if (txstate && status != DMA_ERROR)
+ dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
+
+ return status;
+}
+
+static int sf_pdma_terminate_all(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ sf_pdma_disable_request(chan);
+ kfree(chan->desc);
+ chan->desc = NULL;
+ chan->xfer_err = false;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return 0;
+}
+
+static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+ u32 v;
+
+ v = PDMA_CLAIM_MASK |
+ PDMA_ENABLE_DONE_INT_MASK |
+ PDMA_ENABLE_ERR_INT_MASK |
+ PDMA_RUN_MASK;
+
+ writel(v, regs->ctrl);
+}
+
+static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
+{
+ struct sf_pdma_desc *desc = chan->desc;
+ struct pdma_regs *regs = &chan->regs;
+
+ if (!desc) {
+ dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
+ return;
+ }
+
+ writel(desc->xfer_type, regs->xfer_type);
+ writeq(desc->xfer_size, regs->xfer_size);
+ writeq(desc->dst_addr, regs->dst_addr);
+ writeq(desc->src_addr, regs->src_addr);
+
+ chan->desc = desc;
+ chan->status = DMA_IN_PROGRESS;
+ sf_pdma_enable_request(chan);
+}
+
+static void sf_pdma_issue_pending(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ sf_pdma_xfer_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct sf_pdma_desc *desc;
+
+ desc = to_sf_pdma_desc(vdesc);
+ desc->in_use = false;
+}
+
+static void sf_pdma_donebh_tasklet(unsigned long arg)
+{
+ struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+ struct sf_pdma_desc *desc = chan->desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->xfer_err) {
+ chan->retries = MAX_RETRY;
+ chan->status = DMA_COMPLETE;
+ chan->xfer_err = false;
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+}
+
+static void sf_pdma_errbh_tasklet(unsigned long arg)
+{
+ struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+ struct sf_pdma_desc *desc = chan->desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->retries <= 0) {
+ /* fail to recover */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+ } else {
+ /* retry */
+ chan->retries--;
+ chan->xfer_err = true;
+ chan->status = DMA_ERROR;
+
+ sf_pdma_enable_request(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+}
+
+static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
+{
+ struct sf_pdma_chan *chan = dev_id;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+ u64 residue;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
+ residue = readq(regs->residue);
+
+ if (!residue) {
+ list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ } else {
+ /* submit next trascatioin if possible */
+ struct sf_pdma_desc *desc = chan->desc;
+
+ desc->src_addr += desc->xfer_size - residue;
+ desc->dst_addr += desc->xfer_size - residue;
+ desc->xfer_size = residue;
+
+ sf_pdma_xfer_desc(chan);
+ }
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ tasklet_hi_schedule(&chan->done_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
+{
+ struct sf_pdma_chan *chan = dev_id;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ tasklet_schedule(&chan->err_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sf_pdma_irq_init() - Init PDMA IRQ Handlers
+ * @pdev: pointer of platform_device
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
+ * make sure the pointer passed in are non-NULL. This function should be called
+ * only one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return:
+ * * 0 - OK to init all IRQ handlers
+ * * -EINVAL - Fail to request IRQ
+ */
+static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
+{
+ int irq, r, i;
+ struct sf_pdma_chan *chan;
+
+ for (i = 0; i < pdma->n_chans; i++) {
+ chan = &pdma->chans[i];
+
+ irq = platform_get_irq(pdev, i * 2);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
+ return -EINVAL;
+ }
+
+ r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
+ dev_name(&pdev->dev), (void *)chan);
+ if (r) {
+ dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
+ return -EINVAL;
+ }
+
+ chan->txirq = irq;
+
+ irq = platform_get_irq(pdev, (i * 2) + 1);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
+ return -EINVAL;
+ }
+
+ r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
+ dev_name(&pdev->dev), (void *)chan);
+ if (r) {
+ dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
+ return -EINVAL;
+ }
+
+ chan->errirq = irq;
+ }
+
+ return 0;
+}
+
+/**
+ * sf_pdma_setup_chans() - Init settings of each channel
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize all data structure and register base. Caller should make sure
+ * the pointer passed in are non-NULL. This function should be called only
+ * one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return: none
+ */
+static void sf_pdma_setup_chans(struct sf_pdma *pdma)
+{
+ int i;
+ struct sf_pdma_chan *chan;
+
+ INIT_LIST_HEAD(&pdma->dma_dev.channels);
+
+ for (i = 0; i < pdma->n_chans; i++) {
+ chan = &pdma->chans[i];
+
+ chan->regs.ctrl =
+ SF_PDMA_REG_BASE(i) + PDMA_CTRL;
+ chan->regs.xfer_type =
+ SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
+ chan->regs.xfer_size =
+ SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
+ chan->regs.dst_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
+ chan->regs.src_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
+ chan->regs.act_type =
+ SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
+ chan->regs.residue =
+ SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
+ chan->regs.cur_dst_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
+ chan->regs.cur_src_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
+
+ chan->pdma = pdma;
+ chan->pm_state = RUNNING;
+ chan->slave_id = i;
+ chan->xfer_err = false;
+ spin_lock_init(&chan->lock);
+
+ chan->vchan.desc_free = sf_pdma_free_desc;
+ vchan_init(&chan->vchan, &pdma->dma_dev);
+
+ writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
+
+ tasklet_init(&chan->done_tasklet,
+ sf_pdma_donebh_tasklet, (unsigned long)chan);
+ tasklet_init(&chan->err_tasklet,
+ sf_pdma_errbh_tasklet, (unsigned long)chan);
+ }
+}
+
+static int sf_pdma_probe(struct platform_device *pdev)
+{
+ struct sf_pdma *pdma;
+ struct sf_pdma_chan *chan;
+ struct resource *res;
+ int len, chans;
+ int ret;
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
+ DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
+ DMA_SLAVE_BUSWIDTH_64_BYTES;
+
+ chans = PDMA_NR_CH;
+ len = sizeof(*pdma) + sizeof(*chan) * chans;
+ pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!pdma)
+ return -ENOMEM;
+
+ pdma->n_chans = chans;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdma->membase))
+ goto ERR_MEMBASE;
+
+ ret = sf_pdma_irq_init(pdev, pdma);
+ if (ret)
+ goto ERR_INITIRQ;
+
+ sf_pdma_setup_chans(pdma);
+
+ pdma->dma_dev.dev = &pdev->dev;
+
+ /* Setup capability */
+ dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
+ pdma->dma_dev.copy_align = 2;
+ pdma->dma_dev.src_addr_widths = widths;
+ pdma->dma_dev.dst_addr_widths = widths;
+ pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
+ pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ pdma->dma_dev.descriptor_reuse = true;
+
+ /* Setup DMA APIs */
+ pdma->dma_dev.device_alloc_chan_resources =
+ sf_pdma_alloc_chan_resources;
+ pdma->dma_dev.device_free_chan_resources =
+ sf_pdma_free_chan_resources;
+ pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
+ pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
+ pdma->dma_dev.device_config = sf_pdma_slave_config;
+ pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
+ pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
+
+ platform_set_drvdata(pdev, pdma);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to set DMA mask. Fall back to default.\n");
+
+ ret = dma_async_device_register(&pdma->dma_dev);
+ if (ret)
+ goto ERR_REG_DMADEVICE;
+
+ return 0;
+
+ERR_MEMBASE:
+ devm_kfree(&pdev->dev, pdma);
+ return PTR_ERR(pdma->membase);
+
+ERR_INITIRQ:
+ devm_kfree(&pdev->dev, pdma);
+ return ret;
+
+ERR_REG_DMADEVICE:
+ devm_kfree(&pdev->dev, pdma);
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform DMA. (%d)\n", ret);
+ return ret;
+}
+
+static int sf_pdma_remove(struct platform_device *pdev)
+{
+ struct sf_pdma *pdma = platform_get_drvdata(pdev);
+ struct sf_pdma_chan *ch;
+ int i;
+
+ for (i = 0; i < PDMA_NR_CH; i++) {
+ ch = &pdma->chans[i];
+
+ devm_free_irq(&pdev->dev, ch->txirq, ch);
+ devm_free_irq(&pdev->dev, ch->errirq, ch);
+ list_del(&ch->vchan.chan.device_node);
+ tasklet_kill(&ch->vchan.task);
+ tasklet_kill(&ch->done_tasklet);
+ tasklet_kill(&ch->err_tasklet);
+ }
+
+ dma_async_device_unregister(&pdma->dma_dev);
+
+ return 0;
+}
+
+static const struct of_device_id sf_pdma_dt_ids[] = {
+ { .compatible = "sifive,fu540-c000-pdma" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
+
+static struct platform_driver sf_pdma_driver = {
+ .probe = sf_pdma_probe,
+ .remove = sf_pdma_remove,
+ .driver = {
+ .name = "sf-pdma",
+ .of_match_table = of_match_ptr(sf_pdma_dt_ids),
+ },
+};
+
+static int __init sf_pdma_init(void)
+{
+ return platform_driver_register(&sf_pdma_driver);
+}
+
+static void __exit sf_pdma_exit(void)
+{
+ platform_driver_unregister(&sf_pdma_driver);
+}
+
+/* do early init */
+subsys_initcall(sf_pdma_init);
+module_exit(sf_pdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SiFive Platform DMA driver");
+MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
new file mode 100644
index 000000000000..0c20167b097d
--- /dev/null
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ * SiFive FU540-C000 v1.0
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#ifndef _SF_PDMA_H
+#define _SF_PDMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define PDMA_NR_CH 4
+
+#if (PDMA_NR_CH != 4)
+#error "Please define PDMA_NR_CH to 4"
+#endif
+
+#define PDMA_BASE_ADDR 0x3000000
+#define PDMA_CHAN_OFFSET 0x1000
+
+/* Register Offset */
+#define PDMA_CTRL 0x000
+#define PDMA_XFER_TYPE 0x004
+#define PDMA_XFER_SIZE 0x008
+#define PDMA_DST_ADDR 0x010
+#define PDMA_SRC_ADDR 0x018
+#define PDMA_ACT_TYPE 0x104 /* Read-only */
+#define PDMA_REMAINING_BYTE 0x108 /* Read-only */
+#define PDMA_CUR_DST_ADDR 0x110 /* Read-only*/
+#define PDMA_CUR_SRC_ADDR 0x118 /* Read-only*/
+
+/* CTRL */
+#define PDMA_CLEAR_CTRL 0x0
+#define PDMA_CLAIM_MASK GENMASK(0, 0)
+#define PDMA_RUN_MASK GENMASK(1, 1)
+#define PDMA_ENABLE_DONE_INT_MASK GENMASK(14, 14)
+#define PDMA_ENABLE_ERR_INT_MASK GENMASK(15, 15)
+#define PDMA_DONE_STATUS_MASK GENMASK(30, 30)
+#define PDMA_ERR_STATUS_MASK GENMASK(31, 31)
+
+/* Transfer Type */
+#define PDMA_FULL_SPEED 0xFF000008
+
+/* Error Recovery */
+#define MAX_RETRY 1
+
+#define SF_PDMA_REG_BASE(ch) (pdma->membase + (PDMA_CHAN_OFFSET * (ch)))
+
+struct pdma_regs {
+ /* read-write regs */
+ void __iomem *ctrl; /* 4 bytes */
+
+ void __iomem *xfer_type; /* 4 bytes */
+ void __iomem *xfer_size; /* 8 bytes */
+ void __iomem *dst_addr; /* 8 bytes */
+ void __iomem *src_addr; /* 8 bytes */
+
+ /* read-only */
+ void __iomem *act_type; /* 4 bytes */
+ void __iomem *residue; /* 8 bytes */
+ void __iomem *cur_dst_addr; /* 8 bytes */
+ void __iomem *cur_src_addr; /* 8 bytes */
+};
+
+struct sf_pdma_desc {
+ u32 xfer_type;
+ u64 xfer_size;
+ u64 dst_addr;
+ u64 src_addr;
+ struct virt_dma_desc vdesc;
+ struct sf_pdma_chan *chan;
+ bool in_use;
+ enum dma_transfer_direction dirn;
+ struct dma_async_tx_descriptor *async_tx;
+};
+
+enum sf_pdma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+
+struct sf_pdma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ enum sf_pdma_pm_state pm_state;
+ u32 slave_id;
+ struct sf_pdma *pdma;
+ struct sf_pdma_desc *desc;
+ struct dma_slave_config cfg;
+ u32 attr;
+ dma_addr_t dma_dev_addr;
+ u32 dma_dev_size;
+ struct tasklet_struct done_tasklet;
+ struct tasklet_struct err_tasklet;
+ struct pdma_regs regs;
+ spinlock_t lock; /* protect chan data */
+ bool xfer_err;
+ int txirq;
+ int errirq;
+ int retries;
+};
+
+struct sf_pdma {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *mappedbase;
+ u32 n_chans;
+ struct sf_pdma_chan chans[PDMA_NR_CH];
+};
+
+#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 3993ab65c62c..f06016d38a05 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -203,19 +203,27 @@ struct rcar_dmac {
unsigned int n_channels;
struct rcar_dmac_chan *channels;
- unsigned int channels_mask;
+ u32 channels_mask;
DECLARE_BITMAP(modules, 256);
};
#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
+/*
+ * struct rcar_dmac_of_data - This driver's OF data
+ * @chan_offset_base: DMAC channels base offset
+ * @chan_offset_stride: DMAC channels offset stride
+ */
+struct rcar_dmac_of_data {
+ u32 chan_offset_base;
+ u32 chan_offset_stride;
+};
+
/* -----------------------------------------------------------------------------
* Registers
*/
-#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
-
#define RCAR_DMAISTA 0x0020
#define RCAR_DMASEC 0x0030
#define RCAR_DMAOR 0x0060
@@ -1726,6 +1734,7 @@ static const struct dev_pm_ops rcar_dmac_pm = {
static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
struct rcar_dmac_chan *rchan,
+ const struct rcar_dmac_of_data *data,
unsigned int index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
@@ -1735,7 +1744,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
int ret;
rchan->index = index;
- rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
+ rchan->iomem = dmac->iomem + data->chan_offset_base +
+ data->chan_offset_stride * index;
rchan->mid_rid = -EINVAL;
spin_lock_init(&rchan->lock);
@@ -1800,7 +1810,15 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
return -EINVAL;
}
+ /*
+ * If the driver is unable to read dma-channel-mask property,
+ * the driver assumes that it can use all channels.
+ */
dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+ of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
+
+ /* If the property has out-of-channel mask, this driver clears it */
+ dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
return 0;
}
@@ -1813,10 +1831,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
struct dma_device *engine;
struct rcar_dmac *dmac;
- struct resource *mem;
+ const struct rcar_dmac_of_data *data;
unsigned int i;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
return -ENOMEM;
@@ -1848,8 +1870,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
@@ -1901,7 +1922,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
if (!(dmac->channels_mask & BIT(i)))
continue;
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
if (ret < 0)
goto error;
}
@@ -1948,8 +1969,16 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
rcar_dmac_stop_all_chan(dmac);
}
+static const struct rcar_dmac_of_data rcar_dmac_data = {
+ .chan_offset_base = 0x8000,
+ .chan_offset_stride = 0x80,
+};
+
static const struct of_device_id rcar_dmac_of_ids[] = {
- { .compatible = "renesas,rcar-dmac", },
+ {
+ .compatible = "renesas,rcar-dmac",
+ .data = &rcar_dmac_data,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 8546ad034720..9a31a315dbef 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -99,6 +99,7 @@
/* DMA_CHN_WARP_* register definition */
#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
+#define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0)
#define SPRD_DMA_HIGH_ADDR_OFFSET 4
/* SPRD_DMA_CHN_INTC register definition */
@@ -118,6 +119,8 @@
#define SPRD_DMA_SWT_MODE_OFFSET 26
#define SPRD_DMA_REQ_MODE_OFFSET 24
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
+#define SPRD_DMA_WRAP_SEL_DEST BIT(23)
+#define SPRD_DMA_WRAP_EN BIT(22)
#define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20
#define SPRD_DMA_LLIST_END BIT(19)
@@ -804,6 +807,8 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
+ temp |= schan->linklist.wrap_addr ?
+ SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0;
temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
hw->frg_len = temp;
@@ -831,6 +836,12 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
hw->llist_ptr = lower_32_bits(llist_ptr);
hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
SPRD_DMA_LLIST_HIGH_MASK;
+
+ if (schan->linklist.wrap_addr) {
+ hw->wrap_ptr |= schan->linklist.wrap_addr &
+ SPRD_DMA_WRAP_ADDR_MASK;
+ hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK;
+ }
} else {
hw->llist_ptr = 0;
hw->src_blk_step = 0;
@@ -939,9 +950,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
schan->linklist.phy_addr = ll_cfg->phy_addr;
schan->linklist.virt_addr = ll_cfg->virt_addr;
+ schan->linklist.wrap_addr = ll_cfg->wrap_addr;
} else {
schan->linklist.phy_addr = 0;
schan->linklist.virt_addr = 0;
+ schan->linklist.wrap_addr = 0;
}
/*
@@ -1080,7 +1093,6 @@ static int sprd_dma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct sprd_dma_dev *sdev;
struct sprd_dma_chn *dma_chn;
- struct resource *res;
u32 chn_count;
int ret, i;
@@ -1126,8 +1138,7 @@ static int sprd_dma_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
+ sdev->glb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->glb_base))
return PTR_ERR(sdev->glb_base);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ba7c4f07fcd6..756a3c951dc7 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -260,6 +260,13 @@ struct edma_cc {
*/
unsigned long *slot_inuse;
+ /*
+ * For tracking reserved channels used by DSP.
+ * If the bit is cleared, the channel is allocated to be used by DSP
+ * and Linux must not touch it.
+ */
+ unsigned long *channels_mask;
+
struct dma_device dma_slave;
struct dma_device *dma_memcpy;
struct edma_chan *slave_chans;
@@ -716,6 +723,12 @@ static int edma_alloc_channel(struct edma_chan *echan,
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ if (!test_bit(echan->ch_num, ecc->channels_mask)) {
+ dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
+ echan->ch_num);
+ return -EINVAL;
+ }
+
/* ensure access through shadow region 0 */
edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
EDMA_CHANNEL_BIT(channel));
@@ -2249,10 +2262,8 @@ static int edma_probe(struct platform_device *pdev)
{
struct edma_soc_info *info = pdev->dev.platform_data;
s8 (*queue_priority_mapping)[2];
- int i, off;
- const s16 (*rsv_slots)[2];
- const s16 (*xbar_chans)[2];
- int irq;
+ const s16 (*reserved)[2];
+ int i, irq;
char *irq_name;
struct resource *mem;
struct device_node *node = pdev->dev.of_node;
@@ -2331,15 +2342,32 @@ static int edma_probe(struct platform_device *pdev)
if (!ecc->slot_inuse)
return -ENOMEM;
+ ecc->channels_mask = devm_kcalloc(dev,
+ BITS_TO_LONGS(ecc->num_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!ecc->channels_mask)
+ return -ENOMEM;
+
+ /* Mark all channels available initially */
+ bitmap_fill(ecc->channels_mask, ecc->num_channels);
+
ecc->default_queue = info->default_queue;
if (info->rsv) {
/* Set the reserved slots in inuse list */
- rsv_slots = info->rsv->rsv_slots;
- if (rsv_slots) {
- for (i = 0; rsv_slots[i][0] != -1; i++)
- bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
- rsv_slots[i][1]);
+ reserved = info->rsv->rsv_slots;
+ if (reserved) {
+ for (i = 0; reserved[i][0] != -1; i++)
+ bitmap_set(ecc->slot_inuse, reserved[i][0],
+ reserved[i][1]);
+ }
+
+ /* Clear channels not usable for Linux */
+ reserved = info->rsv->rsv_chans;
+ if (reserved) {
+ for (i = 0; reserved[i][0] != -1; i++)
+ bitmap_clear(ecc->channels_mask, reserved[i][0],
+ reserved[i][1]);
}
}
@@ -2349,14 +2377,6 @@ static int edma_probe(struct platform_device *pdev)
edma_write_slot(ecc, i, &dummy_paramset);
}
- /* Clear the xbar mapped channels in unused list */
- xbar_chans = info->xbar_chans;
- if (xbar_chans) {
- for (i = 0; xbar_chans[i][1] != -1; i++) {
- off = xbar_chans[i][1];
- }
- }
-
irq = platform_get_irq_byname(pdev, "edma3_ccint");
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 0);
@@ -2399,12 +2419,15 @@ static int edma_probe(struct platform_device *pdev)
if (!ecc->legacy_mode) {
int lowest_priority = 0;
+ unsigned int array_max;
struct of_phandle_args tc_args;
ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
sizeof(*ecc->tc_list), GFP_KERNEL);
- if (!ecc->tc_list)
- return -ENOMEM;
+ if (!ecc->tc_list) {
+ ret = -ENOMEM;
+ goto err_reg1;
+ }
for (i = 0;; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
@@ -2420,6 +2443,18 @@ static int edma_probe(struct platform_device *pdev)
info->default_queue = i;
}
}
+
+ /* See if we have optional dma-channel-mask array */
+ array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
+ ret = of_property_read_variable_u32_array(node,
+ "dma-channel-mask",
+ (u32 *)ecc->channels_mask,
+ 1, array_max);
+ if (ret > 0 && ret != array_max)
+ dev_warn(dev, "dma-channel-mask is not complete.\n");
+ else if (ret == -EOVERFLOW || ret == -ENODATA)
+ dev_warn(dev,
+ "dma-channel-mask is out of range or empty\n");
}
/* Event queue priority mapping */
@@ -2437,6 +2472,10 @@ static int edma_probe(struct platform_device *pdev)
edma_dma_init(ecc, legacy_mode);
for (i = 0; i < ecc->num_channels; i++) {
+ /* Do not touch reserved channels */
+ if (!test_bit(i, ecc->channels_mask))
+ continue;
+
/* Assign all channels to the default queue */
edma_assign_channel_eventq(&ecc->slave_chans[i],
info->default_queue);
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index fde54687856b..21b8f1131d55 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -382,7 +382,6 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_mdmac_device *mdev;
struct dma_device *ddev;
- struct resource *res;
int nr_chans, ret, i;
nr_chans = platform_irq_count(pdev);
@@ -398,8 +397,7 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->reg_base = devm_ioremap_resource(dev, res);
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->reg_base))
return PTR_ERR(mdev->reg_base);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5d56f1e4d332..a9c5d5cc9f2b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -25,6 +25,12 @@
* The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
* Access (DMA) between a memory-mapped source address and a memory-mapped
* destination address.
+ *
+ * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
+ * Xilinx IP that provides high-bandwidth direct memory access between
+ * memory and AXI4-Stream target peripherals. It provides scatter gather
+ * (SG) interface with multiple channels independent configuration support.
+ *
*/
#include <linux/bitops.h>
@@ -173,18 +179,6 @@
#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
-/* Multi-Channel DMA Descriptor offsets*/
-#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
-#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
-
-/* Multi-Channel DMA Masks/Shifts */
-#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
-#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
-#define XILINX_DMA_BD_STRIDE_SHIFT 0
-#define XILINX_DMA_BD_VSIZE_SHIFT 19
-
/* AXI CDMA Specific Registers/Offsets */
#define XILINX_CDMA_REG_SRCADDR 0x18
#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -194,6 +188,31 @@
#define xilinx_prep_dma_addr_t(addr) \
((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
+
+/* AXI MCDMA Specific Registers/Offsets */
+#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
+#define XILINX_MCDMA_CHEN_OFFSET 0x0008
+#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
+#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
+#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
+#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
+
+/* AXI MCDMA Specific Masks/Shifts */
+#define XILINX_MCDMA_COALESCE_SHIFT 16
+#define XILINX_MCDMA_COALESCE_MAX 24
+#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
+#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
+#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
+#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
+#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
+#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
+#define XILINX_MCDMA_BD_EOP BIT(30)
+#define XILINX_MCDMA_BD_SOP BIT(31)
+
/**
* struct xilinx_vdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
@@ -221,8 +240,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @mcdma_control: Control field for mcdma @0x10
- * @vsize_stride: Vsize and Stride field for mcdma @0x14
+ * @reserved1: Reserved @0x10
+ * @reserved2: Reserved @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,14 +251,38 @@ struct xilinx_axidma_desc_hw {
u32 next_desc_msb;
u32 buf_addr;
u32 buf_addr_msb;
- u32 mcdma_control;
- u32 vsize_stride;
+ u32 reserved1;
+ u32 reserved2;
u32 control;
u32 status;
u32 app[XILINX_DMA_NUM_APP_WORDS];
} __aligned(64);
/**
+ * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @rsvd: Reserved field @0x10
+ * @control: Control Information field @0x14
+ * @status: Status field @0x18
+ * @sideband_status: Status of sideband signals @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_aximcdma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 rsvd;
+ u32 control;
+ u32 status;
+ u32 sideband_status;
+ u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
* @next_desc_msb: Next Descriptor Pointer MSB @0x04
@@ -286,6 +329,18 @@ struct xilinx_axidma_tx_segment {
} __aligned(64);
/**
+ * struct xilinx_aximcdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_aximcdma_tx_segment {
+ struct xilinx_aximcdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
* struct xilinx_cdma_tx_segment - Descriptor segment
* @hw: Hardware descriptor
* @node: Node in the descriptor segments list
@@ -303,12 +358,16 @@ struct xilinx_cdma_tx_segment {
* @segments: TX segments list
* @node: Node in the channel descriptors list
* @cyclic: Check for cyclic transfers.
+ * @err: Whether the descriptor has an error.
+ * @residue: Residue of the completed descriptor
*/
struct xilinx_dma_tx_descriptor {
struct dma_async_tx_descriptor async_tx;
struct list_head segments;
struct list_head node;
bool cyclic;
+ bool err;
+ u32 residue;
};
/**
@@ -339,8 +398,8 @@ struct xilinx_dma_tx_descriptor {
* @desc_pendingcount: Descriptor pending count
* @ext_addr: Indicates 64 bit addressing is supported by dma channel
* @desc_submitcount: Descriptor h/w submitted count
- * @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @seg_mv: Statically allocated segments base for MCDMA
* @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @cyclic_seg_p: Physical allocated segments base for cyclic dma
@@ -376,8 +435,8 @@ struct xilinx_dma_chan {
u32 desc_pendingcount;
bool ext_addr;
u32 desc_submitcount;
- u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ struct xilinx_aximcdma_tx_segment *seg_mv;
dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
dma_addr_t cyclic_seg_p;
@@ -393,12 +452,14 @@ struct xilinx_dma_chan {
* @XDMA_TYPE_AXIDMA: Axi dma ip.
* @XDMA_TYPE_CDMA: Axi cdma ip.
* @XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
*
*/
enum xdma_ip_type {
XDMA_TYPE_AXIDMA = 0,
XDMA_TYPE_CDMA,
XDMA_TYPE_VDMA,
+ XDMA_TYPE_AXIMCDMA
};
struct xilinx_dma_config {
@@ -406,6 +467,7 @@ struct xilinx_dma_config {
int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
struct clk **tx_clk, struct clk **txs_clk,
struct clk **rx_clk, struct clk **rxs_clk);
+ irqreturn_t (*irq_handler)(int irq, void *data);
};
/**
@@ -414,7 +476,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
- * @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
* @pdev: Platform device structure pointer
@@ -427,13 +488,13 @@ struct xilinx_dma_config {
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
* @max_buffer_len: Max buffer length
+ * @s2mm_index: S2MM channel index
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -446,6 +507,7 @@ struct xilinx_dma_device {
u32 nr_channels;
u32 chan_id;
u32 max_buffer_len;
+ u32 s2mm_index;
};
/* Macros */
@@ -546,6 +608,18 @@ static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
}
}
+static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_desc_hw *hw,
+ dma_addr_t buf_addr, size_t sg_used)
+{
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(buf_addr + sg_used);
+ hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
+ } else {
+ hw->buf_addr = buf_addr + sg_used;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
@@ -613,6 +687,33 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
}
spin_unlock_irqrestore(&chan->lock, flags);
+ if (!segment)
+ dev_dbg(chan->dev, "Could not find free tx segment\n");
+
+ return segment;
+}
+
+/**
+ * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_aximcdma_tx_segment *
+xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
return segment;
}
@@ -627,6 +728,17 @@ static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
hw->next_desc_msb = next_desc_msb;
}
+static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
@@ -641,6 +753,20 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
}
/**
+ * xilinx_mcdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_tx_segment *
+ segment)
+{
+ xilinx_mcdma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
+}
+
+/**
* xilinx_cdma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
* @segment: DMA transaction segment
@@ -694,6 +820,7 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
struct xilinx_vdma_tx_segment *segment, *next;
struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
+ struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
if (!desc)
return;
@@ -709,12 +836,18 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
list_del(&cdma_segment->node);
xilinx_cdma_free_tx_segment(chan, cdma_segment);
}
- } else {
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
list_for_each_entry_safe(axidma_segment, axidma_next,
&desc->segments, node) {
list_del(&axidma_segment->node);
xilinx_dma_free_tx_segment(chan, axidma_segment);
}
+ } else {
+ list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
+ &desc->segments, node) {
+ list_del(&aximcdma_segment->node);
+ xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
+ }
}
kfree(desc);
@@ -783,10 +916,61 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
chan->cyclic_seg_v, chan->cyclic_seg_p);
}
- if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS, chan->seg_mv,
+ chan->seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
}
+
+}
+
+/**
+ * xilinx_dma_get_residue - Compute residue for a given descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ *
+ * Return: The number of residue bytes for the descriptor.
+ */
+static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_cdma_tx_segment *cdma_seg;
+ struct xilinx_axidma_tx_segment *axidma_seg;
+ struct xilinx_cdma_desc_hw *cdma_hw;
+ struct xilinx_axidma_desc_hw *axidma_hw;
+ struct list_head *entry;
+ u32 residue = 0;
+
+ list_for_each(entry, &desc->segments) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_seg = list_entry(entry,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_hw = &cdma_seg->hw;
+ residue += (cdma_hw->control - cdma_hw->status) &
+ chan->xdev->max_buffer_len;
+ } else {
+ axidma_seg = list_entry(entry,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_hw = &axidma_seg->hw;
+ residue += (axidma_hw->control - axidma_hw->status) &
+ chan->xdev->max_buffer_len;
+ }
+ }
+
+ return residue;
}
/**
@@ -823,7 +1007,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -833,14 +1017,22 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Remove from the list of running transactions */
list_del(&desc->node);
- /* Run the link descriptor callback function */
- dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (dmaengine_desc_callback_valid(&cb)) {
- spin_unlock_irqrestore(&chan->lock, flags);
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock_irqsave(&chan->lock, flags);
+ if (unlikely(desc->err)) {
+ if (chan->direction == DMA_DEV_TO_MEM)
+ result.result = DMA_TRANS_READ_FAILED;
+ else
+ result.result = DMA_TRANS_WRITE_FAILED;
+ } else {
+ result.result = DMA_TRANS_NOERROR;
}
+ result.residue = desc->residue;
+
+ /* Run the link descriptor callback function */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
+ spin_lock_irqsave(&chan->lock, flags);
+
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
@@ -922,6 +1114,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
list_add_tail(&chan->seg_v[i].node,
&chan->free_seg_list);
}
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ /* Allocate the buffer descriptors. */
+ chan->seg_mv = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_mv) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_mv[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_mv[i].node,
+ &chan->free_seg_list);
+ }
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
@@ -937,7 +1153,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
}
if (!chan->desc_pool &&
- (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
+ ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
chan->id);
@@ -1003,8 +1220,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
enum dma_status ret;
unsigned long flags;
u32 residue = 0;
@@ -1013,23 +1228,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE || !txstate)
return ret;
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
- desc = list_last_entry(&chan->active_list,
- struct xilinx_dma_tx_descriptor, node);
- if (chan->has_sg) {
- list_for_each_entry(segment, &desc->segments, node) {
- hw = &segment->hw;
- residue += (hw->control - hw->status) &
- chan->xdev->max_buffer_len;
- }
- }
- spin_unlock_irqrestore(&chan->lock, flags);
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ /*
+ * VDMA and simple mode do not support residue reporting, so the
+ * residue field will always be 0.
+ */
+ if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+ residue = xilinx_dma_get_residue(chan, desc);
- chan->residue = residue;
- dma_set_residue(txstate, chan->residue);
- }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dma_set_residue(txstate, residue);
return ret;
}
@@ -1301,53 +1513,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg && !chan->xdev->mcdma)
+ if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
- if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_CDESC(chan->tdest),
- head_desc->async_tx.phys);
- }
- }
- }
-
xilinx_dma_start(chan);
if (chan->err)
return;
/* Start the transfer */
- if (chan->has_sg && !chan->xdev->mcdma) {
+ if (chan->has_sg) {
if (chan->cyclic)
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
chan->cyclic_seg_v->phys);
else
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
- } else if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_TDESC(chan->tdest),
- tail_segment->phys);
- }
- }
} else {
struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw;
@@ -1371,6 +1553,76 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
}
/**
+ * xilinx_mcdma_start_transfer - Starts MCDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_axidma_tx_segment *tail_segment;
+ u32 reg;
+
+ /*
+ * lock has been held by calling functions, so we don't need it
+ * to take it here again.
+ */
+
+ if (chan->err)
+ return;
+
+ if (!chan->idle)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+
+ if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
+ reg &= ~XILINX_MCDMA_COALESCE_MASK;
+ reg |= chan->desc_pendingcount <<
+ XILINX_MCDMA_COALESCE_SHIFT;
+ }
+
+ reg |= XILINX_MCDMA_IRQ_ALL_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ /* Program current descriptor */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
+ head_desc->async_tx.phys);
+
+ /* Program channel enable register */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
+ reg |= BIT(chan->tdest);
+ dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
+
+ /* Start the fetch of BDs for the channel */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+ reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
+ tail_segment->phys);
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ chan->idle = false;
+}
+
+/**
* xilinx_dma_issue_pending - Issue pending transactions
* @dchan: DMA channel
*/
@@ -1399,6 +1651,13 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ if (chan->has_sg && chan->xdev->dma_config->dmatype !=
+ XDMA_TYPE_VDMA)
+ desc->residue = xilinx_dma_get_residue(chan, desc);
+ else
+ desc->residue = 0;
+ desc->err = chan->err;
+
list_del(&desc->node);
if (!desc->cyclic)
dma_cookie_complete(&desc->async_tx);
@@ -1433,6 +1692,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
chan->err = false;
chan->idle = true;
+ chan->desc_pendingcount = 0;
chan->desc_submitcount = 0;
return err;
@@ -1461,6 +1721,74 @@ static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
}
/**
+ * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx MCDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dma_chan *chan = data;
+ u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
+ else
+ ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
+
+ /* Read the channel id raising the interrupt*/
+ chan_sermask = dma_ctrl_read(chan, ser_offset);
+ chan_id = ffs(chan_sermask);
+
+ if (!chan_id)
+ return IRQ_NONE;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ chan_offset = chan->xdev->s2mm_index;
+
+ chan_offset = chan_offset + (chan_id - 1);
+ chan = chan->xdev->chan[chan_offset];
+ /* Read the status and ack the interrupts. */
+ status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
+ if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
+ status & XILINX_MCDMA_IRQ_ALL_MASK);
+
+ if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
+ dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
+ chan,
+ dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
+ (chan->tdest)),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
+ (chan->tdest)));
+ chan->err = true;
+ }
+
+ if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
+ spin_lock(&chan->lock);
+ xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
+ chan->start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
* xilinx_dma_irq_handler - DMA Interrupt handler
* @irq: IRQ number
* @data: Pointer to the Xilinx DMA channel structure
@@ -1967,31 +2295,32 @@ error:
}
/**
- * xilinx_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
+ * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
- * @xt: Interleaved template pointer
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
* @flags: transfer ack flags
+ * @context: APP words of the descriptor
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
-xilinx_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
+xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
-
- if (!is_slave_direction(xt->dir))
- return NULL;
-
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ u32 *app_w = (u32 *)context;
+ struct scatterlist *sg;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
- if (xt->frame_size != 1)
+ if (!is_slave_direction(direction))
return NULL;
/* Allocate a transaction descriptor. */
@@ -1999,54 +2328,67 @@ xilinx_dma_prep_interleaved(struct dma_chan *dchan,
if (!desc)
return NULL;
- chan->direction = xt->dir;
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
- hw = &segment->hw;
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ struct xilinx_aximcdma_desc_hw *hw;
- /* Fill in the descriptor */
- if (xt->dir != DMA_MEM_TO_DEV)
- hw->buf_addr = xt->dst_start;
- else
- hw->buf_addr = xt->src_start;
+ /* Get a free segment */
+ segment = xilinx_aximcdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
- hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
- hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
- XILINX_DMA_BD_VSIZE_MASK;
- hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
- XILINX_DMA_BD_STRIDE_MASK;
- hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+ chan->xdev->max_buffer_len);
+ hw = &segment->hw;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
+ /* Fill in the descriptor */
+ xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
+ sg_used);
+ hw->control = copy;
+ if (chan->direction == DMA_MEM_TO_DEV && app_w) {
+ memcpy(hw->app, app_w, sizeof(u32) *
+ XILINX_DMA_NUM_APP_WORDS);
+ }
+
+ sg_used += copy;
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
+ struct xilinx_aximcdma_tx_segment, node);
desc->async_tx.phys = segment->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (xt->dir == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_MCDMA_BD_SOP;
segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
+ struct xilinx_aximcdma_tx_segment,
node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
+ segment->hw.control |= XILINX_MCDMA_BD_EOP;
}
return &desc->async_tx;
error:
xilinx_dma_free_tx_descriptor(chan, desc);
+
return NULL;
}
@@ -2194,7 +2536,9 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+ err);
return err;
}
@@ -2259,14 +2603,18 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
+ err);
return err;
}
*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
if (IS_ERR(*dev_clk)) {
err = PTR_ERR(*dev_clk);
- dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
+ err);
return err;
}
@@ -2299,7 +2647,9 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+ err);
return err;
}
@@ -2321,7 +2671,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
err = clk_prepare_enable(*axi_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
+ err);
return err;
}
@@ -2454,6 +2805,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
+ xdev->s2mm_index = xdev->nr_channels;
chan->tdest = chan_id - xdev->nr_channels;
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
@@ -2463,7 +2815,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
XILINX_VDMA_ENABLE_VERTICAL_FLIP;
}
- chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
+ else
+ chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
chan->config.park = 1;
@@ -2478,9 +2834,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
}
/* Request the interrupt */
- chan->irq = irq_of_parse_and_map(node, 0);
- err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
- "xilinx-dma-controller", chan);
+ chan->irq = irq_of_parse_and_map(node, chan->tdest);
+ err = request_irq(chan->irq, xdev->dma_config->irq_handler,
+ IRQF_SHARED, "xilinx-dma-controller", chan);
if (err) {
dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
return err;
@@ -2489,6 +2845,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
chan->start_transfer = xilinx_dma_start_transfer;
chan->stop_transfer = xilinx_dma_stop_transfer;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ chan->start_transfer = xilinx_mcdma_start_transfer;
+ chan->stop_transfer = xilinx_dma_stop_transfer;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->start_transfer = xilinx_cdma_start_transfer;
chan->stop_transfer = xilinx_cdma_stop_transfer;
@@ -2545,7 +2904,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
int ret, i, nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
- if ((ret < 0) && xdev->mcdma)
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
dev_warn(xdev->dev, "missing dma-channels property\n");
for (i = 0; i < nr_channels; i++)
@@ -2578,22 +2937,31 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
static const struct xilinx_dma_config axidma_config = {
.dmatype = XDMA_TYPE_AXIDMA,
.clk_init = axidma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
+static const struct xilinx_dma_config aximcdma_config = {
+ .dmatype = XDMA_TYPE_AXIMCDMA,
+ .clk_init = axidma_clk_init,
+ .irq_handler = xilinx_mcdma_irq_handler,
+};
static const struct xilinx_dma_config axicdma_config = {
.dmatype = XDMA_TYPE_CDMA,
.clk_init = axicdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
static const struct xilinx_dma_config axivdma_config = {
.dmatype = XDMA_TYPE_VDMA,
.clk_init = axivdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
static const struct of_device_id xilinx_dma_of_ids[] = {
{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+ { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
@@ -2612,7 +2980,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
- struct resource *io;
u32 num_frames, addr_width, len_width;
int i, err;
@@ -2638,16 +3005,15 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return err;
/* Request and map I/O memory */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ xdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->regs))
return PTR_ERR(xdev->regs);
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
+ xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
if (!of_property_read_u32(node, "xlnx,sg-length-width",
&len_width)) {
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
@@ -2712,14 +3078,17 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
- xdev->common.device_prep_interleaved_dma =
- xilinx_dma_prep_interleaved;
- /* Residue calculation is supported by only AXI DMA */
+ /* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ /* Residue calculation is supported by only AXI DMA and CDMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
} else {
xdev->common.device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved;
@@ -2755,6 +3124,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
else
dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c
index 9f4436f7c914..5fe2e8b9a7b8 100644
--- a/drivers/dma/zx_dma.c
+++ b/drivers/dma/zx_dma.c
@@ -754,18 +754,13 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
static int zx_dma_probe(struct platform_device *op)
{
struct zx_dma_dev *d;
- struct resource *iores;
int i, ret = 0;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
- d->base = devm_ioremap_resource(&op->dev, iores);
+ d->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
@@ -894,7 +889,6 @@ static int zx_dma_remove(struct platform_device *op)
list_del(&c->vc.chan.device_node);
}
clk_disable_unprepare(d->clk);
- dmam_pool_destroy(d->pool);
return 0;
}
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index c777088f5828..6e291d8f3a27 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1686,7 +1686,8 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (ret < 0)
goto fail;
- ret = fw_iso_buffer_map_vma(&client->buffer, vma);
+ ret = vm_map_pages_zero(vma, client->buffer.pages,
+ client->buffer.page_count);
if (ret < 0)
goto fail;
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index df8a56a979b9..185b0b78b3d6 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -91,13 +91,6 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
}
EXPORT_SYMBOL(fw_iso_buffer_init);
-int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
- struct vm_area_struct *vma)
-{
- return vm_map_pages_zero(vma, buffer->pages,
- buffer->page_count);
-}
-
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0f0bed3a4bbb..4b0e4ee655a1 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -158,8 +158,6 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
-int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
- struct vm_area_struct *vma);
/* -topology */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 522f3addb5bd..33269316f111 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1752,7 +1752,7 @@ static u32 update_bus_time(struct fw_ohci *ohci)
if (unlikely(!ohci->bus_time_running)) {
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
- ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
+ ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
(cycle_time_seconds & 0x40);
ohci->bus_time_running = true;
}
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 92f843eaf1e0..7a30952b463d 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -135,8 +135,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
return NULL;
id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
- if (id < 0)
- goto free_mem;
+ if (id < 0) {
+ kfree(scmi_dev);
+ return NULL;
+ }
scmi_dev->id = id;
scmi_dev->protocol_id = protocol;
@@ -154,8 +156,6 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
put_dev:
put_device(&scmi_dev->dev);
ida_simple_remove(&scmi_bus_id, id);
-free_mem:
- kfree(scmi_dev);
return NULL;
}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 4a8012e3cb8c..601af4edad5e 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -323,7 +323,7 @@ static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
if (db->mask)
val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set, db->addr);
+ iowrite64_hi_lo(db->set | val, db->addr);
}
#endif
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 1e21fc3e9851..2045566d622f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -35,6 +35,7 @@ static struct dmi_memdev_info {
const char *bank;
u64 size; /* bytes */
u16 handle;
+ u8 type; /* DDR2, DDR3, DDR4 etc */
} *dmi_memdev;
static int dmi_memdev_nr;
@@ -391,7 +392,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
u64 bytes;
u16 size;
- if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
+ if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x13)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -400,6 +401,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
dmi_memdev[nr].handle = get_unaligned(&dm->handle);
dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+ dmi_memdev[nr].type = d[0x12];
size = get_unaligned((u16 *)&d[0xC]);
if (size == 0)
@@ -1128,3 +1130,40 @@ u64 dmi_memdev_size(u16 handle)
return ~0ull;
}
EXPORT_SYMBOL_GPL(dmi_memdev_size);
+
+/**
+ * dmi_memdev_type - get the memory type
+ * @handle: DMI structure handle
+ *
+ * Return the DMI memory type of the module in the slot associated with the
+ * given DMI handle, or 0x0 if no such DMI handle exists.
+ */
+u8 dmi_memdev_type(u16 handle)
+{
+ int n;
+
+ if (dmi_memdev) {
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle)
+ return dmi_memdev[n].type;
+ }
+ }
+ return 0x0; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_type);
+
+/**
+ * dmi_memdev_handle - get the DMI handle of a memory slot
+ * @slot: slot number
+ *
+ * Return the DMI handle associated with a given memory slot, or %0xFFFF
+ * if there is no such slot.
+ */
+u16 dmi_memdev_handle(int slot)
+{
+ if (dmi_memdev && slot >= 0 && slot < dmi_memdev_nr)
+ return dmi_memdev[slot].handle;
+
+ return 0xffff; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_handle);
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index a43d2db5cbdb..4265e9dbed84 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -114,7 +114,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
dev_info(dev, "NXP i.MX DSP IPC initialized\n");
- return devm_of_platform_populate(dev);
+ return 0;
out:
kfree(chan_name);
for (j = 0; j < i; j++) {
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
index 687121f8c4d5..db655e87cdc8 100644
--- a/drivers/firmware/imx/imx-scu-irq.c
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -8,6 +8,7 @@
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/mailbox_client.h>
#define IMX_SC_IRQ_FUNC_ENABLE 1
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 04a24a863d6e..03b43b7a6d1d 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -107,6 +107,12 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
struct imx_sc_rpc_msg *hdr;
u32 *data = msg;
+ if (!sc_ipc->msg) {
+ dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
+ sc_chan->idx, *data);
+ return;
+ }
+
if (sc_chan->idx == 0) {
hdr = msg;
sc_ipc->rx_size = hdr->size;
@@ -156,6 +162,7 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
*/
int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
{
+ uint8_t saved_svc, saved_func;
struct imx_sc_rpc_msg *hdr;
int ret;
@@ -165,7 +172,11 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
mutex_lock(&sc_ipc->lock);
reinit_completion(&sc_ipc->done);
- sc_ipc->msg = msg;
+ if (have_resp) {
+ sc_ipc->msg = msg;
+ saved_svc = ((struct imx_sc_rpc_msg *)msg)->svc;
+ saved_func = ((struct imx_sc_rpc_msg *)msg)->func;
+ }
sc_ipc->count = 0;
ret = imx_scu_ipc_write(sc_ipc, msg);
if (ret < 0) {
@@ -184,9 +195,20 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
/* response status is stored in hdr->func field */
hdr = msg;
ret = hdr->func;
+ /*
+ * Some special SCU firmware APIs do NOT have return value
+ * in hdr->func, but they do have response data, those special
+ * APIs are defined as void function in SCU firmware, so they
+ * should be treated as return success always.
+ */
+ if ((saved_svc == IMX_SC_RPC_SVC_MISC) &&
+ (saved_func == IMX_SC_MISC_FUNC_UNIQUE_ID ||
+ saved_func == IMX_SC_MISC_FUNC_GET_BUTTON_STATUS))
+ ret = 0;
}
out:
+ sc_ipc->msg = NULL;
mutex_unlock(&sc_ipc->lock);
dev_dbg(sc_ipc->dev, "RPC SVC done\n");
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 8d908a8e0d20..1d5b4d74f96d 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -35,7 +35,7 @@ struct meson_sm_chip {
struct meson_sm_cmd cmd[];
};
-struct meson_sm_chip gxbb_chip = {
+static const struct meson_sm_chip gxbb_chip = {
.shmem_size = SZ_4K,
.cmd_shmem_in_base = 0x82000020,
.cmd_shmem_out_base = 0x82000021,
@@ -54,8 +54,6 @@ struct meson_sm_firmware {
void __iomem *sm_shmem_out_base;
};
-static struct meson_sm_firmware fw;
-
static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
unsigned int cmd_index)
{
@@ -90,6 +88,7 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
/**
* meson_sm_call - generic SMC32 call to the secure-monitor
*
+ * @fw: Pointer to secure-monitor firmware
* @cmd_index: Index of the SMC32 function ID
* @ret: Returned value
* @arg0: SMC32 Argument 0
@@ -100,15 +99,15 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
*
* Return: 0 on success, a negative value on error
*/
-int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0,
- u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
+ u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 cmd, lret;
- if (!fw.chip)
+ if (!fw->chip)
return -ENOENT;
- cmd = meson_sm_get_cmd(fw.chip, cmd_index);
+ cmd = meson_sm_get_cmd(fw->chip, cmd_index);
if (!cmd)
return -EINVAL;
@@ -124,6 +123,7 @@ EXPORT_SYMBOL(meson_sm_call);
/**
* meson_sm_call_read - retrieve data from secure-monitor
*
+ * @fw: Pointer to secure-monitor firmware
* @buffer: Buffer to store the retrieved data
* @bsize: Size of the buffer
* @cmd_index: Index of the SMC32 function ID
@@ -137,22 +137,23 @@ EXPORT_SYMBOL(meson_sm_call);
* When 0 is returned there is no guarantee about the amount of
* data read and bsize bytes are copied in buffer.
*/
-int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int bsize, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 size;
int ret;
- if (!fw.chip)
+ if (!fw->chip)
return -ENOENT;
- if (!fw.chip->cmd_shmem_out_base)
+ if (!fw->chip->cmd_shmem_out_base)
return -EINVAL;
- if (bsize > fw.chip->shmem_size)
+ if (bsize > fw->chip->shmem_size)
return -EINVAL;
- if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
+ if (meson_sm_call(fw, cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
return -EINVAL;
if (size > bsize)
@@ -164,7 +165,7 @@ int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
size = bsize;
if (buffer)
- memcpy(buffer, fw.sm_shmem_out_base, size);
+ memcpy(buffer, fw->sm_shmem_out_base, size);
return ret;
}
@@ -173,6 +174,7 @@ EXPORT_SYMBOL(meson_sm_call_read);
/**
* meson_sm_call_write - send data to secure-monitor
*
+ * @fw: Pointer to secure-monitor firmware
* @buffer: Buffer containing data to send
* @size: Size of the data to send
* @cmd_index: Index of the SMC32 function ID
@@ -184,23 +186,24 @@ EXPORT_SYMBOL(meson_sm_call_read);
*
* Return: size of sent data on success, a negative value on error
*/
-int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int size, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 written;
- if (!fw.chip)
+ if (!fw->chip)
return -ENOENT;
- if (size > fw.chip->shmem_size)
+ if (size > fw->chip->shmem_size)
return -EINVAL;
- if (!fw.chip->cmd_shmem_in_base)
+ if (!fw->chip->cmd_shmem_in_base)
return -EINVAL;
- memcpy(fw.sm_shmem_in_base, buffer, size);
+ memcpy(fw->sm_shmem_in_base, buffer, size);
- if (meson_sm_call(cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
+ if (meson_sm_call(fw, cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
return -EINVAL;
if (!written)
@@ -210,6 +213,24 @@ int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
}
EXPORT_SYMBOL(meson_sm_call_write);
+/**
+ * meson_sm_get - get pointer to meson_sm_firmware structure.
+ *
+ * @sm_node: Pointer to the secure-monitor Device Tree node.
+ *
+ * Return: NULL is the secure-monitor device is not ready.
+ */
+struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
+{
+ struct platform_device *pdev = of_find_device_by_node(sm_node);
+
+ if (!pdev)
+ return NULL;
+
+ return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL_GPL(meson_sm_get);
+
#define SM_CHIP_ID_LENGTH 119
#define SM_CHIP_ID_OFFSET 4
#define SM_CHIP_ID_SIZE 12
@@ -217,33 +238,25 @@ EXPORT_SYMBOL(meson_sm_call_write);
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct meson_sm_firmware *fw;
uint8_t *id_buf;
int ret;
+ fw = platform_get_drvdata(pdev);
+
id_buf = kmalloc(SM_CHIP_ID_LENGTH, GFP_KERNEL);
if (!id_buf)
return -ENOMEM;
- ret = meson_sm_call_read(id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
+ ret = meson_sm_call_read(fw, id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
0, 0, 0, 0, 0);
if (ret < 0) {
kfree(id_buf);
return ret;
}
- ret = sprintf(buf, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- id_buf[SM_CHIP_ID_OFFSET + 0],
- id_buf[SM_CHIP_ID_OFFSET + 1],
- id_buf[SM_CHIP_ID_OFFSET + 2],
- id_buf[SM_CHIP_ID_OFFSET + 3],
- id_buf[SM_CHIP_ID_OFFSET + 4],
- id_buf[SM_CHIP_ID_OFFSET + 5],
- id_buf[SM_CHIP_ID_OFFSET + 6],
- id_buf[SM_CHIP_ID_OFFSET + 7],
- id_buf[SM_CHIP_ID_OFFSET + 8],
- id_buf[SM_CHIP_ID_OFFSET + 9],
- id_buf[SM_CHIP_ID_OFFSET + 10],
- id_buf[SM_CHIP_ID_OFFSET + 11]);
+ ret = sprintf(buf, "%12phN\n", &id_buf[SM_CHIP_ID_OFFSET]);
kfree(id_buf);
@@ -268,25 +281,34 @@ static const struct of_device_id meson_sm_ids[] = {
static int __init meson_sm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct meson_sm_chip *chip;
+ struct meson_sm_firmware *fw;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
- chip = of_match_device(meson_sm_ids, &pdev->dev)->data;
+ chip = of_match_device(meson_sm_ids, dev)->data;
if (chip->cmd_shmem_in_base) {
- fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
- chip->shmem_size);
- if (WARN_ON(!fw.sm_shmem_in_base))
+ fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw->sm_shmem_in_base))
goto out;
}
if (chip->cmd_shmem_out_base) {
- fw.sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
- chip->shmem_size);
- if (WARN_ON(!fw.sm_shmem_out_base))
+ fw->sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw->sm_shmem_out_base))
goto out_in_base;
}
- fw.chip = chip;
+ fw->chip = chip;
+
+ platform_set_drvdata(pdev, fw);
+
pr_info("secure-monitor enabled\n");
if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
@@ -295,7 +317,7 @@ static int __init meson_sm_probe(struct platform_device *pdev)
return 0;
out_in_base:
- iounmap(fw.sm_shmem_in_base);
+ iounmap(fw->sm_shmem_in_base);
out:
return -EINVAL;
}
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 215061c581e1..48e2ef794ea3 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -442,6 +442,41 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
+int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size,
+ u32 mode)
+{
+ struct ocmem_tz_lock {
+ __le32 id;
+ __le32 offset;
+ __le32 size;
+ __le32 mode;
+ } request;
+
+ request.id = cpu_to_le32(id);
+ request.offset = cpu_to_le32(offset);
+ request.size = cpu_to_le32(size);
+ request.mode = cpu_to_le32(mode);
+
+ return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD,
+ &request, sizeof(request), NULL, 0);
+}
+
+int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size)
+{
+ struct ocmem_tz_unlock {
+ __le32 id;
+ __le32 offset;
+ __le32 size;
+ } request;
+
+ request.id = cpu_to_le32(id);
+ request.offset = cpu_to_le32(offset);
+ request.size = cpu_to_le32(size);
+
+ return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD,
+ &request, sizeof(request), NULL, 0);
+}
+
void __qcom_scm_init(void)
{
}
@@ -582,7 +617,22 @@ int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare)
{
- return -ENODEV;
+ struct msm_scm_sec_cfg {
+ __le32 id;
+ __le32 ctx_bank_num;
+ } cfg;
+ int ret, scm_ret = 0;
+
+ cfg.id = cpu_to_le32(device_id);
+ cfg.ctx_bank_num = cpu_to_le32(spare);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
+ &cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret));
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
}
int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
@@ -614,3 +664,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 91d5ad7cf58b..3c5850350974 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock);
#define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
-*/
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res)
+static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, u32 type)
+{
+ u64 cmd;
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5,
+ quirk.state.a6, 0, res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ cmd = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, bool atomic)
+{
+ int retry_count = 0;
+
+ if (atomic) {
+ __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
+ return;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __qcom_scm_call_do(desc, res, fn_id, x5,
+ ARM_SMCCC_STD_CALL);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int retry_count = 0, i;
+ int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
- u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+ u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
- struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
@@ -117,46 +157,56 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
x5 = args_phys;
}
- do {
- mutex_lock(&qcom_scm_lock);
-
- cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
- qcom_smccc_convention,
- ARM_SMCCC_OWNER_SIP, fn_id);
-
- quirk.state.a6 = 0;
-
- do {
- arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
- desc->args[1], desc->args[2], x5,
- quirk.state.a6, 0, res, &quirk);
-
- if (res->a0 == QCOM_SCM_INTERRUPTED)
- cmd = res->a0;
-
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-
- mutex_unlock(&qcom_scm_lock);
-
- if (res->a0 == QCOM_SCM_V2_EBUSY) {
- if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
- break;
- msleep(QCOM_SCM_EBUSY_WAIT_MS);
- }
- } while (res->a0 == QCOM_SCM_V2_EBUSY);
+ qcom_scm_call_do(desc, res, fn_id, x5, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
- if (res->a0 < 0)
+ if ((long)res->a0 < 0)
return qcom_scm_remap_error(res->a0);
return 0;
}
/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ might_sleep();
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
+}
+
+/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
@@ -241,6 +291,18 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
return ret;
}
+int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset,
+ uint32_t size, uint32_t mode)
+{
+ return -ENOTSUPP;
+}
+
+int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset,
+ uint32_t size)
+{
+ return -ENOTSUPP;
+}
+
void __qcom_scm_init(void)
{
u64 cmd;
@@ -502,3 +564,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
+ desc.args[1] = en;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
+ QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 4802ab170fe5..1ba0df4b97ab 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -192,6 +192,46 @@ bool qcom_scm_pas_supported(u32 peripheral)
EXPORT_SYMBOL(qcom_scm_pas_supported);
/**
+ * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
+ */
+bool qcom_scm_ocmem_lock_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC,
+ QCOM_SCM_OCMEM_LOCK_CMD);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
+
+/**
+ * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
+ * region to the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ * @mode: access mode (WIDE/NARROW)
+ */
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode)
+{
+ return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock);
+
+/**
+ * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
+ * region from the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ */
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
+{
+ return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
+
+/**
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
* state machine for a given peripheral, using the
* metadata
@@ -327,6 +367,19 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
.deassert = qcom_scm_pas_reset_deassert,
};
+/**
+ * qcom_scm_restore_sec_cfg_available() - Check if secure environment
+ * supports restore security config interface.
+ *
+ * Return true if restore-cfg interface is supported, false if not.
+ */
+bool qcom_scm_restore_sec_cfg_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_RESTORE_SEC_CFG);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
+
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
{
return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
@@ -345,6 +398,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 99506bd873c0..81dcf5f1138e 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -42,6 +42,15 @@ extern int __qcom_scm_hdcp_req(struct device *dev,
extern void __qcom_scm_init(void);
+#define QCOM_SCM_OCMEM_SVC 0xf
+#define QCOM_SCM_OCMEM_LOCK_CMD 0x1
+#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2
+
+extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset,
+ u32 size, u32 mode);
+extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset,
+ u32 size);
+
#define QCOM_SCM_SVC_PIL 0x2
#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
@@ -91,10 +100,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_CONFIG_ERRATA1 0x3
+#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
+extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
+ bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz,
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 19c56133234b..6741fcda0c37 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -804,7 +804,7 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
}
static const struct dev_pm_ops tegra_bpmp_pm_ops = {
- .resume_early = tegra_bpmp_resume,
+ .resume_noirq = tegra_bpmp_resume,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index fd3d83745208..75bdfaa08380 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -711,8 +711,11 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
int ret;
np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
- if (!np)
- return 0;
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (!np)
+ return 0;
+ }
of_node_put(np);
ret = get_set_conduit_method(dev->of_node);
@@ -770,6 +773,7 @@ static int zynqmp_firmware_remove(struct platform_device *pdev)
static const struct of_device_id zynqmp_firmware_of_match[] = {
{.compatible = "xlnx,zynqmp-firmware"},
+ {.compatible = "xlnx,versal-firmware"},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 400c09b905f8..1f7d9bbec0fc 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -178,46 +178,25 @@ static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(port_state & mask);
}
+static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
+
static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- size_t i;
- static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
- const unsigned int gpio_reg_size = 8;
- unsigned int bits_offset;
- size_t word_index;
- unsigned int word_offset;
- unsigned long word_mask;
- const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
+ unsigned long offset;
+ unsigned long gpio_mask;
+ unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
- /* get bits are evaluated a gpio port register at a time */
- for (i = 0; i < ARRAY_SIZE(ports); i++) {
- /* gpio offset in bits array */
- bits_offset = i * gpio_reg_size;
-
- /* word index for bits array */
- word_index = BIT_WORD(bits_offset);
-
- /* gpio offset within current word of bits array */
- word_offset = bits_offset % BITS_PER_LONG;
-
- /* mask of get bits for current gpio within current word */
- word_mask = mask[word_index] & (port_mask << word_offset);
- if (!word_mask) {
- /* no get bits in this port so skip to next one */
- continue;
- }
-
- /* read bits from current gpio port */
- port_state = inb(dio48egpio->base + ports[i]);
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ port_addr = dio48egpio->base + ports[offset / 8];
+ port_state = inb(port_addr) & gpio_mask;
- /* store acquired bits at respective bits array offset */
- bits[word_index] |= (port_state << word_offset) & word_mask;
+ bitmap_set_value8(bits, port_state, offset);
}
return 0;
@@ -247,37 +226,27 @@ static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- unsigned int i;
- const unsigned int gpio_reg_size = 8;
- unsigned int port;
- unsigned int out_port;
- unsigned int bitmask;
+ unsigned long offset;
+ unsigned long gpio_mask;
+ size_t index;
+ unsigned int port_addr;
+ unsigned long bitmask;
unsigned long flags;
- /* set bits are evaluated a gpio register size at a time */
- for (i = 0; i < chip->ngpio; i += gpio_reg_size) {
- /* no more set bits in this mask word; skip to the next word */
- if (!mask[BIT_WORD(i)]) {
- i = (BIT_WORD(i) + 1) * BITS_PER_LONG - gpio_reg_size;
- continue;
- }
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ index = offset / 8;
+ port_addr = dio48egpio->base + ports[index];
- port = i / gpio_reg_size;
- out_port = (port > 2) ? port + 1 : port;
- bitmask = mask[BIT_WORD(i)] & bits[BIT_WORD(i)];
+ bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
/* update output state data and set device gpio register */
- dio48egpio->out_state[port] &= ~mask[BIT_WORD(i)];
- dio48egpio->out_state[port] |= bitmask;
- outb(dio48egpio->out_state[port], dio48egpio->base + out_port);
+ dio48egpio->out_state[index] &= ~gpio_mask;
+ dio48egpio->out_state[index] |= bitmask;
+ outb(dio48egpio->out_state[index], port_addr);
raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-
- /* prepare for next gpio register set */
- mask[BIT_WORD(i)] >>= gpio_reg_size;
- bits[BIT_WORD(i)] >>= gpio_reg_size;
}
}
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index c50329ab493a..d350ac0de06b 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -85,42 +85,20 @@ static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- size_t i;
+ unsigned long offset;
+ unsigned long gpio_mask;
static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
- const unsigned int gpio_reg_size = 8;
- unsigned int bits_offset;
- size_t word_index;
- unsigned int word_offset;
- unsigned long word_mask;
- const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
+ unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
- /* get bits are evaluated a gpio port register at a time */
- for (i = 0; i < ARRAY_SIZE(ports); i++) {
- /* gpio offset in bits array */
- bits_offset = i * gpio_reg_size;
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ port_addr = idi48gpio->base + ports[offset / 8];
+ port_state = inb(port_addr) & gpio_mask;
- /* word index for bits array */
- word_index = BIT_WORD(bits_offset);
-
- /* gpio offset within current word of bits array */
- word_offset = bits_offset % BITS_PER_LONG;
-
- /* mask of get bits for current gpio within current word */
- word_mask = mask[word_index] & (port_mask << word_offset);
- if (!word_mask) {
- /* no get bits in this port so skip to next one */
- continue;
- }
-
- /* read bits from current gpio port */
- port_state = inb(idi48gpio->base + ports[i]);
-
- /* store acquired bits at respective bits array offset */
- bits[word_index] |= (port_state << word_offset) & word_mask;
+ bitmap_set_value8(bits, port_state, offset);
}
return 0;
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e81307f9754e..05637d585152 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
*/
+#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
@@ -72,20 +73,18 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
- unsigned int i, idx, shift;
- u8 bank, bankmask;
+ unsigned long offset;
+ unsigned long bankmask;
+ size_t bank;
+ unsigned long bitmask;
mutex_lock(&chip->lock);
- for (i = 0, bank = chip->registers - 1; i < chip->registers;
- i++, bank--) {
- idx = i / sizeof(*mask);
- shift = i % sizeof(*mask) * BITS_PER_BYTE;
- bankmask = mask[idx] >> shift;
- if (!bankmask)
- continue;
+ for_each_set_clump8(offset, bankmask, mask, chip->registers * 8) {
+ bank = chip->registers - 1 - offset / 8;
+ bitmask = bitmap_get_value8(bits, offset) & bankmask;
chip->buffer[bank] &= ~bankmask;
- chip->buffer[bank] |= bankmask & (bits[idx] >> shift);
+ chip->buffer[bank] |= bitmask;
}
__gen_74x164_write_config(chip);
mutex_unlock(&chip->lock);
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index c22d6f94129c..b89b8c5ff1f5 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -167,46 +167,25 @@ static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(port_state & mask);
}
+static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
+
static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- size_t i;
- static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
- const unsigned int gpio_reg_size = 8;
- unsigned int bits_offset;
- size_t word_index;
- unsigned int word_offset;
- unsigned long word_mask;
- const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
+ unsigned long offset;
+ unsigned long gpio_mask;
+ unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
- /* get bits are evaluated a gpio port register at a time */
- for (i = 0; i < ARRAY_SIZE(ports); i++) {
- /* gpio offset in bits array */
- bits_offset = i * gpio_reg_size;
-
- /* word index for bits array */
- word_index = BIT_WORD(bits_offset);
-
- /* gpio offset within current word of bits array */
- word_offset = bits_offset % BITS_PER_LONG;
-
- /* mask of get bits for current gpio within current word */
- word_mask = mask[word_index] & (port_mask << word_offset);
- if (!word_mask) {
- /* no get bits in this port so skip to next one */
- continue;
- }
-
- /* read bits from current gpio port */
- port_state = inb(gpiommgpio->base + ports[i]);
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ port_addr = gpiommgpio->base + ports[offset / 8];
+ port_state = inb(port_addr) & gpio_mask;
- /* store acquired bits at respective bits array offset */
- bits[word_index] |= (port_state << word_offset) & word_mask;
+ bitmap_set_value8(bits, port_state, offset);
}
return 0;
@@ -237,37 +216,27 @@ static void gpiomm_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- unsigned int i;
- const unsigned int gpio_reg_size = 8;
- unsigned int port;
- unsigned int out_port;
- unsigned int bitmask;
+ unsigned long offset;
+ unsigned long gpio_mask;
+ size_t index;
+ unsigned int port_addr;
+ unsigned long bitmask;
unsigned long flags;
- /* set bits are evaluated a gpio register size at a time */
- for (i = 0; i < chip->ngpio; i += gpio_reg_size) {
- /* no more set bits in this mask word; skip to the next word */
- if (!mask[BIT_WORD(i)]) {
- i = (BIT_WORD(i) + 1) * BITS_PER_LONG - gpio_reg_size;
- continue;
- }
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ index = offset / 8;
+ port_addr = gpiommgpio->base + ports[index];
- port = i / gpio_reg_size;
- out_port = (port > 2) ? port + 1 : port;
- bitmask = mask[BIT_WORD(i)] & bits[BIT_WORD(i)];
+ bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
spin_lock_irqsave(&gpiommgpio->lock, flags);
/* update output state data and set device gpio register */
- gpiommgpio->out_state[port] &= ~mask[BIT_WORD(i)];
- gpiommgpio->out_state[port] |= bitmask;
- outb(gpiommgpio->out_state[port], gpiommgpio->base + out_port);
+ gpiommgpio->out_state[index] &= ~gpio_mask;
+ gpiommgpio->out_state[index] |= bitmask;
+ outb(gpiommgpio->out_state[index], port_addr);
spin_unlock_irqrestore(&gpiommgpio->lock, flags);
-
- /* prepare for next gpio register set */
- mask[BIT_WORD(i)] >>= gpio_reg_size;
- bits[BIT_WORD(i)] >>= gpio_reg_size;
}
}
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index 0696d5a21431..310d1a248cae 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -31,6 +31,7 @@
*/
#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/crc8.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
@@ -232,16 +233,20 @@ static int max3191x_get_multiple(struct gpio_chip *gpio, unsigned long *mask,
unsigned long *bits)
{
struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
- int ret, bit = 0, wordlen = max3191x_wordlen(max3191x);
+ const unsigned int wordlen = max3191x_wordlen(max3191x);
+ int ret;
+ unsigned long bit;
+ unsigned long gpio_mask;
+ unsigned long in;
mutex_lock(&max3191x->lock);
ret = max3191x_readout_locked(max3191x);
if (ret)
goto out_unlock;
- while ((bit = find_next_bit(mask, gpio->ngpio, bit)) != gpio->ngpio) {
+ bitmap_zero(bits, gpio->ngpio);
+ for_each_set_clump8(bit, gpio_mask, mask, gpio->ngpio) {
unsigned int chipnum = bit / MAX3191X_NGPIO;
- unsigned long in, shift, index;
if (max3191x_chip_is_faulting(max3191x, chipnum)) {
ret = -EIO;
@@ -249,12 +254,8 @@ static int max3191x_get_multiple(struct gpio_chip *gpio, unsigned long *mask,
}
in = ((u8 *)max3191x->xfer.rx_buf)[chipnum * wordlen];
- shift = round_down(bit % BITS_PER_LONG, MAX3191X_NGPIO);
- index = bit / BITS_PER_LONG;
- bits[index] &= ~(mask[index] & (0xff << shift));
- bits[index] |= mask[index] & (in << shift); /* copy bits */
-
- bit = (chipnum + 1) * MAX3191X_NGPIO; /* go to next chip */
+ in &= gpio_mask;
+ bitmap_set_value8(bits, in, bit);
}
out_unlock:
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 82122c3c688a..6652bee01966 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -9,7 +9,7 @@
*/
#include <linux/acpi.h>
-#include <linux/bits.h>
+#include <linux/bitmap.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -115,6 +115,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
#define MAX_BANK 5
#define BANK_SZ 8
+#define MAX_LINE (MAX_BANK * BANK_SZ)
#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
@@ -146,10 +147,10 @@ struct pca953x_chip {
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
- u8 irq_mask[MAX_BANK];
- u8 irq_stat[MAX_BANK];
- u8 irq_trig_raise[MAX_BANK];
- u8 irq_trig_fall[MAX_BANK];
+ DECLARE_BITMAP(irq_mask, MAX_LINE);
+ DECLARE_BITMAP(irq_stat, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
struct irq_chip irq_chip;
#endif
atomic_t wakeup_path;
@@ -333,12 +334,16 @@ static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
return regaddr;
}
-static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
u8 regaddr = pca953x_recalc_addr(chip, reg, 0, true, true);
- int ret;
+ u8 value[MAX_BANK];
+ int i, ret;
+
+ for (i = 0; i < NBANK(chip); i++)
+ value[i] = bitmap_get_value8(val, i * BANK_SZ);
- ret = regmap_bulk_write(chip->regmap, regaddr, val, NBANK(chip));
+ ret = regmap_bulk_write(chip->regmap, regaddr, value, NBANK(chip));
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
return ret;
@@ -347,17 +352,21 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
return 0;
}
-static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
+static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
u8 regaddr = pca953x_recalc_addr(chip, reg, 0, false, true);
- int ret;
+ u8 value[MAX_BANK];
+ int i, ret;
- ret = regmap_bulk_read(chip->regmap, regaddr, val, NBANK(chip));
+ ret = regmap_bulk_read(chip->regmap, regaddr, value, NBANK(chip));
if (ret < 0) {
dev_err(&chip->client->dev, "failed reading register\n");
return ret;
}
+ for (i = 0; i < NBANK(chip); i++)
+ bitmap_set_value8(val, value[i], i * BANK_SZ);
+
return 0;
}
@@ -412,7 +421,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
ret = regmap_read(chip->regmap, inreg, &reg_val);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
- /* NOTE: diagnostic already emitted; that's all we should
+ /*
+ * NOTE:
+ * diagnostic already emitted; that's all we should
* do unless gpio_*_value_cansleep() calls become different
* from their nonsleeping siblings (and report faults).
*/
@@ -459,9 +470,7 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- unsigned int bank_mask, bank_val;
- int bank;
- u8 reg_val[MAX_BANK];
+ DECLARE_BITMAP(reg_val, MAX_LINE);
int ret;
mutex_lock(&chip->i2c_lock);
@@ -469,16 +478,7 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
if (ret)
goto exit;
- for (bank = 0; bank < NBANK(chip); bank++) {
- bank_mask = mask[bank / sizeof(*mask)] >>
- ((bank % sizeof(*mask)) * 8);
- if (bank_mask) {
- bank_val = bits[bank / sizeof(*bits)] >>
- ((bank % sizeof(*bits)) * 8);
- bank_val &= bank_mask;
- reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
- }
- }
+ bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio);
pca953x_write_regs(chip, chip->regs->output, reg_val);
exit:
@@ -605,10 +605,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 new_irqs;
- int level, i;
- u8 invert_irq_mask[MAX_BANK];
- u8 reg_direction[MAX_BANK];
+ DECLARE_BITMAP(irq_mask, MAX_LINE);
+ DECLARE_BITMAP(reg_direction, MAX_LINE);
+ int level;
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
@@ -616,25 +615,18 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
/* Enable latch on interrupt-enabled inputs */
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
- for (i = 0; i < NBANK(chip); i++)
- invert_irq_mask[i] = ~chip->irq_mask[i];
+ bitmap_complement(irq_mask, chip->irq_mask, gc->ngpio);
/* Unmask enabled interrupts */
- pca953x_write_regs(chip, PCAL953X_INT_MASK, invert_irq_mask);
+ pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask);
}
+ bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
+
/* Look for any newly setup interrupt */
- for (i = 0; i < NBANK(chip); i++) {
- new_irqs = chip->irq_trig_fall[i] | chip->irq_trig_raise[i];
- new_irqs &= reg_direction[i];
-
- while (new_irqs) {
- level = __ffs(new_irqs);
- pca953x_gpio_direction_input(&chip->gpio_chip,
- level + (BANK_SZ * i));
- new_irqs &= ~(1 << level);
- }
- }
+ for_each_set_bit(level, irq_mask, gc->ngpio)
+ pca953x_gpio_direction_input(&chip->gpio_chip, level);
mutex_unlock(&chip->irq_lock);
}
@@ -675,15 +667,15 @@ static void pca953x_irq_shutdown(struct irq_data *d)
chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
}
-static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
+static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
{
- u8 cur_stat[MAX_BANK];
- u8 old_stat[MAX_BANK];
- bool pending_seen = false;
- bool trigger_seen = false;
- u8 trigger[MAX_BANK];
- u8 reg_direction[MAX_BANK];
- int ret, i;
+ struct gpio_chip *gc = &chip->gpio_chip;
+ DECLARE_BITMAP(reg_direction, MAX_LINE);
+ DECLARE_BITMAP(old_stat, MAX_LINE);
+ DECLARE_BITMAP(cur_stat, MAX_LINE);
+ DECLARE_BITMAP(new_stat, MAX_LINE);
+ DECLARE_BITMAP(trigger, MAX_LINE);
+ int ret;
if (chip->driver_data & PCA_PCAL) {
/* Read the current interrupt status from the device */
@@ -692,20 +684,16 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
return false;
/* Check latched inputs and clear interrupt status */
- ret = pca953x_read_regs(chip, PCA953X_INPUT, cur_stat);
+ ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
- for (i = 0; i < NBANK(chip); i++) {
- /* Apply filter for rising/falling edge selection */
- pending[i] = (~cur_stat[i] & chip->irq_trig_fall[i]) |
- (cur_stat[i] & chip->irq_trig_raise[i]);
- pending[i] &= trigger[i];
- if (pending[i])
- pending_seen = true;
- }
+ /* Apply filter for rising/falling edge selection */
+ bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
+
+ bitmap_and(pending, new_stat, trigger, gc->ngpio);
- return pending_seen;
+ return !bitmap_empty(pending, gc->ngpio);
}
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
@@ -714,64 +702,49 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
/* Remove output pins from the equation */
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
- for (i = 0; i < NBANK(chip); i++)
- cur_stat[i] &= reg_direction[i];
- memcpy(old_stat, chip->irq_stat, NBANK(chip));
+ bitmap_copy(old_stat, chip->irq_stat, gc->ngpio);
- for (i = 0; i < NBANK(chip); i++) {
- trigger[i] = (cur_stat[i] ^ old_stat[i]) & chip->irq_mask[i];
- if (trigger[i])
- trigger_seen = true;
- }
+ bitmap_and(new_stat, cur_stat, reg_direction, gc->ngpio);
+ bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
+ bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
- if (!trigger_seen)
+ if (bitmap_empty(trigger, gc->ngpio))
return false;
- memcpy(chip->irq_stat, cur_stat, NBANK(chip));
+ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
- for (i = 0; i < NBANK(chip); i++) {
- pending[i] = (old_stat[i] & chip->irq_trig_fall[i]) |
- (cur_stat[i] & chip->irq_trig_raise[i]);
- pending[i] &= trigger[i];
- if (pending[i])
- pending_seen = true;
- }
+ bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
+ bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
+ bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
+ bitmap_and(pending, new_stat, trigger, gc->ngpio);
- return pending_seen;
+ return !bitmap_empty(pending, gc->ngpio);
}
static irqreturn_t pca953x_irq_handler(int irq, void *devid)
{
struct pca953x_chip *chip = devid;
- u8 pending[MAX_BANK];
- u8 level;
- unsigned nhandled = 0;
- int i;
+ struct gpio_chip *gc = &chip->gpio_chip;
+ DECLARE_BITMAP(pending, MAX_LINE);
+ int level;
if (!pca953x_irq_pending(chip, pending))
return IRQ_NONE;
- for (i = 0; i < NBANK(chip); i++) {
- while (pending[i]) {
- level = __ffs(pending[i]);
- handle_nested_irq(irq_find_mapping(chip->gpio_chip.irq.domain,
- level + (BANK_SZ * i)));
- pending[i] &= ~(1 << level);
- nhandled++;
- }
- }
+ for_each_set_bit(level, pending, gc->ngpio)
+ handle_nested_irq(irq_find_mapping(gc->irq.domain, level));
- return (nhandled > 0) ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_HANDLED;
}
-static int pca953x_irq_setup(struct pca953x_chip *chip,
- int irq_base)
+static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
{
struct i2c_client *client = chip->client;
struct irq_chip *irq_chip = &chip->irq_chip;
- u8 reg_direction[MAX_BANK];
- int ret, i;
+ DECLARE_BITMAP(reg_direction, MAX_LINE);
+ DECLARE_BITMAP(irq_stat, MAX_LINE);
+ int ret;
if (!client->irq)
return 0;
@@ -782,7 +755,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (!(chip->driver_data & PCA_INT))
return 0;
- ret = pca953x_read_regs(chip, chip->regs->input, chip->irq_stat);
+ ret = pca953x_read_regs(chip, chip->regs->input, irq_stat);
if (ret)
return ret;
@@ -792,8 +765,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
* this purpose.
*/
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
- for (i = 0; i < NBANK(chip); i++)
- chip->irq_stat[i] &= reg_direction[i];
+ bitmap_and(chip->irq_stat, irq_stat, reg_direction, chip->gpio_chip.ngpio);
mutex_init(&chip->irq_lock);
ret = devm_request_threaded_irq(&client->dev, client->irq,
@@ -816,9 +788,9 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
irq_chip->irq_set_type = pca953x_irq_set_type;
irq_chip->irq_shutdown = pca953x_irq_shutdown;
- ret = gpiochip_irqchip_add_nested(&chip->gpio_chip, irq_chip,
- irq_base, handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&chip->gpio_chip, irq_chip,
+ irq_base, handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&client->dev,
"could not connect irqchip to gpiochip\n");
@@ -845,8 +817,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{
+ DECLARE_BITMAP(val, MAX_LINE);
int ret;
- u8 val[MAX_BANK];
ret = regcache_sync_region(chip->regmap, chip->regs->output,
chip->regs->output + NBANK(chip));
@@ -860,9 +832,9 @@ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
/* set platform specific polarity inversion */
if (invert)
- memset(val, 0xFF, NBANK(chip));
+ bitmap_fill(val, MAX_LINE);
else
- memset(val, 0, NBANK(chip));
+ bitmap_zero(val, MAX_LINE);
ret = pca953x_write_regs(chip, chip->regs->invert, val);
out:
@@ -871,8 +843,8 @@ out:
static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
{
+ DECLARE_BITMAP(val, MAX_LINE);
int ret;
- u8 val[MAX_BANK];
ret = device_pca95xx_init(chip, invert);
if (ret)
@@ -892,7 +864,7 @@ out:
static const struct of_device_id pca953x_dt_ids[];
static int pca953x_probe(struct i2c_client *client,
- const struct i2c_device_id *i2c_id)
+ const struct i2c_device_id *i2c_id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
@@ -901,8 +873,7 @@ static int pca953x_probe(struct i2c_client *client,
u32 invert = 0;
struct regulator *reg;
- chip = devm_kzalloc(&client->dev,
- sizeof(struct pca953x_chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
@@ -1016,7 +987,7 @@ static int pca953x_probe(struct i2c_client *client,
if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
- chip->gpio_chip.ngpio, pdata->context);
+ chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
dev_warn(&client->dev, "setup failed, %d\n", ret);
}
@@ -1036,7 +1007,7 @@ static int pca953x_remove(struct i2c_client *client)
if (pdata && pdata->teardown) {
ret = pdata->teardown(client, chip->gpio_chip.base,
- chip->gpio_chip.ngpio, pdata->context);
+ chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
dev_err(&client->dev, "teardown failed, %d\n", ret);
} else {
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index df51dd08bdfe..638d6656ce73 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -100,45 +100,23 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- size_t i;
- const unsigned int gpio_reg_size = 8;
- unsigned int bits_offset;
- size_t word_index;
- unsigned int word_offset;
- unsigned long word_mask;
- const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
- unsigned long port_state;
+ unsigned long offset;
+ unsigned long gpio_mask;
void __iomem *ports[] = {
&idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15,
&idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15,
};
+ void __iomem *port_addr;
+ unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
- /* get bits are evaluated a gpio port register at a time */
- for (i = 0; i < ARRAY_SIZE(ports); i++) {
- /* gpio offset in bits array */
- bits_offset = i * gpio_reg_size;
-
- /* word index for bits array */
- word_index = BIT_WORD(bits_offset);
-
- /* gpio offset within current word of bits array */
- word_offset = bits_offset % BITS_PER_LONG;
-
- /* mask of get bits for current gpio within current word */
- word_mask = mask[word_index] & (port_mask << word_offset);
- if (!word_mask) {
- /* no get bits in this port so skip to next one */
- continue;
- }
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ port_addr = ports[offset / 8];
+ port_state = ioread8(port_addr) & gpio_mask;
- /* read bits from current gpio port */
- port_state = ioread8(ports[i]);
-
- /* store acquired bits at respective bits array offset */
- bits[word_index] |= (port_state << word_offset) & word_mask;
+ bitmap_set_value8(bits, port_state, offset);
}
return 0;
@@ -178,30 +156,31 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
+ unsigned long offset;
+ unsigned long gpio_mask;
+ void __iomem *ports[] = {
+ &idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15,
+ };
+ size_t index;
+ void __iomem *port_addr;
+ unsigned long bitmask;
unsigned long flags;
- unsigned int out_state;
+ unsigned long out_state;
- raw_spin_lock_irqsave(&idio16gpio->lock, flags);
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ index = offset / 8;
+ port_addr = ports[index];
- /* process output lines 0-7 */
- if (*mask & 0xFF) {
- out_state = ioread8(&idio16gpio->reg->out0_7) & ~*mask;
- out_state |= *mask & *bits;
- iowrite8(out_state, &idio16gpio->reg->out0_7);
- }
+ bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
+
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
- /* shift to next output line word */
- *mask >>= 8;
+ out_state = ioread8(port_addr) & ~gpio_mask;
+ out_state |= bitmask;
+ iowrite8(out_state, port_addr);
- /* process output lines 8-15 */
- if (*mask & 0xFF) {
- *bits >>= 8;
- out_state = ioread8(&idio16gpio->reg->out8_15) & ~*mask;
- out_state |= *mask & *bits;
- iowrite8(out_state, &idio16gpio->reg->out8_15);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
-
- raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
static void idio_16_irq_ack(struct irq_data *data)
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 44c1e4fc489f..1d475794a50f 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -201,52 +201,34 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- size_t i;
- const unsigned int gpio_reg_size = 8;
- unsigned int bits_offset;
- size_t word_index;
- unsigned int word_offset;
- unsigned long word_mask;
- const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
- unsigned long port_state;
+ unsigned long offset;
+ unsigned long gpio_mask;
void __iomem *ports[] = {
&idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
&idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7,
&idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23,
};
+ size_t index;
+ unsigned long port_state;
const unsigned long out_mode_mask = BIT(1);
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
- /* get bits are evaluated a gpio port register at a time */
- for (i = 0; i < ARRAY_SIZE(ports) + 1; i++) {
- /* gpio offset in bits array */
- bits_offset = i * gpio_reg_size;
-
- /* word index for bits array */
- word_index = BIT_WORD(bits_offset);
-
- /* gpio offset within current word of bits array */
- word_offset = bits_offset % BITS_PER_LONG;
-
- /* mask of get bits for current gpio within current word */
- word_mask = mask[word_index] & (port_mask << word_offset);
- if (!word_mask) {
- /* no get bits in this port so skip to next one */
- continue;
- }
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ index = offset / 8;
/* read bits from current gpio port (port 6 is TTL GPIO) */
- if (i < 6)
- port_state = ioread8(ports[i]);
+ if (index < 6)
+ port_state = ioread8(ports[index]);
else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
port_state = ioread8(&idio24gpio->reg->ttl_out0_7);
else
port_state = ioread8(&idio24gpio->reg->ttl_in0_7);
- /* store acquired bits at respective bits array offset */
- bits[word_index] |= (port_state << word_offset) & word_mask;
+ port_state &= gpio_mask;
+
+ bitmap_set_value8(bits, port_state, offset);
}
return 0;
@@ -297,59 +279,48 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- size_t i;
- unsigned long bits_offset;
+ unsigned long offset;
unsigned long gpio_mask;
- const unsigned int gpio_reg_size = 8;
- const unsigned long port_mask = GENMASK(gpio_reg_size, 0);
- unsigned long flags;
- unsigned int out_state;
void __iomem *ports[] = {
&idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
&idio24gpio->reg->out16_23
};
+ size_t index;
+ unsigned long bitmask;
+ unsigned long flags;
+ unsigned long out_state;
const unsigned long out_mode_mask = BIT(1);
- const unsigned int ttl_offset = 48;
- const size_t ttl_i = BIT_WORD(ttl_offset);
- const unsigned int word_offset = ttl_offset % BITS_PER_LONG;
- const unsigned long ttl_mask = (mask[ttl_i] >> word_offset) & port_mask;
- const unsigned long ttl_bits = (bits[ttl_i] >> word_offset) & ttl_mask;
-
- /* set bits are processed a gpio port register at a time */
- for (i = 0; i < ARRAY_SIZE(ports); i++) {
- /* gpio offset in bits array */
- bits_offset = i * gpio_reg_size;
-
- /* check if any set bits for current port */
- gpio_mask = (*mask >> bits_offset) & port_mask;
- if (!gpio_mask) {
- /* no set bits for this port so move on to next port */
- continue;
- }
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+ for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
+ index = offset / 8;
- /* process output lines */
- out_state = ioread8(ports[i]) & ~gpio_mask;
- out_state |= (*bits >> bits_offset) & gpio_mask;
- iowrite8(out_state, ports[i]);
+ bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
- }
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
- /* check if setting TTL lines and if they are in output mode */
- if (!ttl_mask || !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask))
- return;
+ /* read bits from current gpio port (port 6 is TTL GPIO) */
+ if (index < 6) {
+ out_state = ioread8(ports[index]);
+ } else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask) {
+ out_state = ioread8(&idio24gpio->reg->ttl_out0_7);
+ } else {
+ /* skip TTL GPIO if set for input */
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ continue;
+ }
- /* handle TTL output */
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+ /* set requested bit states */
+ out_state &= ~gpio_mask;
+ out_state |= bitmask;
- /* process output lines */
- out_state = ioread8(&idio24gpio->reg->ttl_out0_7) & ~ttl_mask;
- out_state |= ttl_bits;
- iowrite8(out_state, &idio24gpio->reg->ttl_out0_7);
+ /* write bits for current gpio port (port 6 is TTL GPIO) */
+ if (index < 6)
+ iowrite8(out_state, ports[index]);
+ else
+ iowrite8(out_state, &idio24gpio->reg->ttl_out0_7);
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
}
static void idio_24_irq_ack(struct irq_data *data)
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index 1331b2a94679..6698feabaced 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -96,16 +96,16 @@ static int pisosr_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct pisosr_gpio *gpio = gpiochip_get_data(chip);
- unsigned int nbytes = DIV_ROUND_UP(chip->ngpio, 8);
- unsigned int i, j;
+ unsigned long offset;
+ unsigned long gpio_mask;
+ unsigned long buffer_state;
pisosr_gpio_refresh(gpio);
bitmap_zero(bits, chip->ngpio);
- for (i = 0; i < nbytes; i++) {
- j = i / sizeof(unsigned long);
- bits[j] |= ((unsigned long) gpio->buffer[i])
- << (8 * (i % sizeof(unsigned long)));
+ for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
+ buffer_state = gpio->buffer[offset / 8] & gpio_mask;
+ bitmap_set_value8(bits, buffer_state, offset);
}
return 0;
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index bd203e8fa58e..7ec97499b7f7 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -15,9 +15,6 @@
#include <linux/spinlock.h>
#include <dt-bindings/gpio/uniphier-gpio.h>
-#define UNIPHIER_GPIO_BANK_MASK \
- GENMASK((UNIPHIER_GPIO_LINES_PER_BANK) - 1, 0)
-
#define UNIPHIER_GPIO_IRQ_MAX_NUM 24
#define UNIPHIER_GPIO_PORT_DATA 0x0 /* data */
@@ -150,15 +147,11 @@ static void uniphier_gpio_set(struct gpio_chip *chip,
static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
- unsigned int bank, shift, bank_mask, bank_bits;
- int i;
+ unsigned long i, bank, bank_mask, bank_bits;
- for (i = 0; i < chip->ngpio; i += UNIPHIER_GPIO_LINES_PER_BANK) {
+ for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / UNIPHIER_GPIO_LINES_PER_BANK;
- shift = i % BITS_PER_LONG;
- bank_mask = (mask[BIT_WORD(i)] >> shift) &
- UNIPHIER_GPIO_BANK_MASK;
- bank_bits = bits[BIT_WORD(i)] >> shift;
+ bank_bits = bitmap_get_value8(bits, i);
uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
bank_mask, bank_bits);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index fe456bea81f6..cb510df2b014 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -129,42 +129,19 @@ static int ws16c48_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned int gpio_reg_size = 8;
- size_t i;
- const size_t num_ports = chip->ngpio / gpio_reg_size;
- unsigned int bits_offset;
- size_t word_index;
- unsigned int word_offset;
- unsigned long word_mask;
- const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
+ unsigned long offset;
+ unsigned long gpio_mask;
+ unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
- /* get bits are evaluated a gpio port register at a time */
- for (i = 0; i < num_ports; i++) {
- /* gpio offset in bits array */
- bits_offset = i * gpio_reg_size;
+ for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
+ port_addr = ws16c48gpio->base + offset / 8;
+ port_state = inb(port_addr) & gpio_mask;
- /* word index for bits array */
- word_index = BIT_WORD(bits_offset);
-
- /* gpio offset within current word of bits array */
- word_offset = bits_offset % BITS_PER_LONG;
-
- /* mask of get bits for current gpio within current word */
- word_mask = mask[word_index] & (port_mask << word_offset);
- if (!word_mask) {
- /* no get bits in this port so skip to next one */
- continue;
- }
-
- /* read bits from current gpio port */
- port_state = inb(ws16c48gpio->base + i);
-
- /* store acquired bits at respective bits array offset */
- bits[word_index] |= (port_state << word_offset) & word_mask;
+ bitmap_set_value8(bits, port_state, offset);
}
return 0;
@@ -198,39 +175,29 @@ static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- unsigned int i;
- const unsigned int gpio_reg_size = 8;
- unsigned int port;
- unsigned int iomask;
- unsigned int bitmask;
+ unsigned long offset;
+ unsigned long gpio_mask;
+ size_t index;
+ unsigned int port_addr;
+ unsigned long bitmask;
unsigned long flags;
- /* set bits are evaluated a gpio register size at a time */
- for (i = 0; i < chip->ngpio; i += gpio_reg_size) {
- /* no more set bits in this mask word; skip to the next word */
- if (!mask[BIT_WORD(i)]) {
- i = (BIT_WORD(i) + 1) * BITS_PER_LONG - gpio_reg_size;
- continue;
- }
-
- port = i / gpio_reg_size;
+ for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
+ index = offset / 8;
+ port_addr = ws16c48gpio->base + index;
/* mask out GPIO configured for input */
- iomask = mask[BIT_WORD(i)] & ~ws16c48gpio->io_state[port];
- bitmask = iomask & bits[BIT_WORD(i)];
+ gpio_mask &= ~ws16c48gpio->io_state[index];
+ bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
/* update output state data and set device gpio register */
- ws16c48gpio->out_state[port] &= ~iomask;
- ws16c48gpio->out_state[port] |= bitmask;
- outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ ws16c48gpio->out_state[index] &= ~gpio_mask;
+ ws16c48gpio->out_state[index] |= bitmask;
+ outb(ws16c48gpio->out_state[index], port_addr);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
-
- /* prepare for next gpio register set */
- mask[BIT_WORD(i)] >>= gpio_reg_size;
- bits[BIT_WORD(i)] >>= gpio_reg_size;
}
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1168351267fd..bfdadc3667e0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -95,6 +95,7 @@ config DRM_KMS_FB_HELPER
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
+ depends on STACKTRACE_SUPPORT
select STACKDEPOT
depends on DRM_KMS_HELPER
depends on DEBUG_KERNEL
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7d35b5b66229..888209eb8cec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -105,11 +105,24 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
(kfd_mem_limit.max_ttm_mem_limit >> 20));
}
+/* Estimate page table size needed to represent a given memory size
+ *
+ * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
+ * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
+ * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
+ * for 2MB pages for TLB efficiency. However, small allocations and
+ * fragmented system memory still need some 4KB pages. We choose a
+ * compromise that should work in most cases without reserving too
+ * much memory for page tables unnecessarily (factor 16K, >> 14).
+ */
+#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
+
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
+ uint64_t reserved_for_pt =
+ ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
- uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 2770cba56a6b..44be3a45b25e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1487,8 +1487,8 @@ out:
return ret;
/* Start rlc autoload after psp recieved all the gfx firmware */
- if (psp->autoload_supported && ucode->ucode_id ==
- AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
+ AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 7de16c0c2f20..2a8e04895595 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -27,7 +27,8 @@
#include <linux/bits.h>
#include "smu_v11_0_i2c.h"
-#define EEPROM_I2C_TARGET_ADDR 0xA0
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
+#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -83,7 +84,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
{
int ret = 0;
struct i2c_msg msg = {
- .addr = EEPROM_I2C_TARGET_ADDR,
+ .addr = 0,
.flags = 0,
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
.buf = buff,
@@ -93,6 +94,8 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
*(uint16_t *)buff = EEPROM_HDR_START;
__encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE);
+ msg.addr = control->i2c_address;
+
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
if (ret < 1)
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
@@ -203,7 +206,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct i2c_msg msg = {
- .addr = EEPROM_I2C_TARGET_ADDR,
+ .addr = 0,
.flags = I2C_M_RD,
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
.buf = buff,
@@ -213,10 +216,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
switch (adev->asic_type) {
case CHIP_VEGA20:
+ control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
break;
case CHIP_ARCTURUS:
+ control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
break;
@@ -229,6 +234,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return ret;
}
+ msg.addr = control->i2c_address;
+
/* Read/Create table header from EEPROM address 0 */
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
if (ret < 1) {
@@ -408,8 +415,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
* Update bits 16,17 of EEPROM address in I2C address by setting them
* to bits 1,2 of Device address byte
*/
- msg->addr = EEPROM_I2C_TARGET_ADDR |
- ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15);
+ msg->addr = control->i2c_address |
+ ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15);
msg->flags = write ? 0 : I2C_M_RD;
msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE;
msg->buf = buff;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 622269957c1b..ca78f812d436 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -50,6 +50,7 @@ struct amdgpu_ras_eeprom_control {
struct mutex tbl_mutex;
bool bus_locked;
uint32_t tbl_byte_sum;
+ uint16_t i2c_address; // 8-bit represented address
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index c8793e6cc3c5..6373bfb47d55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
*/
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
u32 dws;
int r;
/* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
@@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
return r;
}
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 2d64d270725d..1befdee9f0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1346,10 +1346,13 @@ static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
- if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
r = smu7_asic_baco_reset(adev);
- else
+ } else {
r = cik_asic_pci_config_reset(adev);
+ }
return r;
}
@@ -1441,7 +1444,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1476,12 +1478,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1491,14 +1488,17 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@@ -1522,15 +1522,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1543,26 +1551,45 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1577,15 +1604,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ca5f0e7ea1ac..f2c1b026397b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -690,59 +690,61 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
- rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
- version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
- version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
+ if (!amdgpu_sriov_vf(adev)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
+ adev->gfx.rlc.clear_state_descriptor_offset =
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
+ adev->gfx.rlc.avail_scratch_ram_locations =
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
+ adev->gfx.rlc.reg_restore_list_size =
le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
+ adev->gfx.rlc.reg_list_format_start =
le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
+ adev->gfx.rlc.reg_list_format_separate_start =
le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
+ adev->gfx.rlc.starting_offsets_start =
le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
+ adev->gfx.rlc.reg_list_format_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
+ adev->gfx.rlc.reg_list_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
+ adev->gfx.rlc.register_list_format =
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
- goto out;
- }
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ err = -ENOMEM;
+ goto out;
+ }
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
+ if (adev->gfx.rlc.is_rlc_v2_1)
+ gfx_v10_0_init_rlc_ext_microcode(adev);
+ }
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -993,39 +995,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
- AMDGPU_GEM_DOMAIN_VRAM);
- if (!r)
- adev->gfx.rlc.clear_state_gpu_addr =
- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- return r;
-}
-
-static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
-
- if (!adev->gfx.rlc.clear_state_obj)
- return;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- }
-}
-
static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1787,25 +1756,7 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->in_gpu_reset) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (r)
- return r;
-
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (!r) {
- adev->gfx.rlc.funcs->get_csb_buffer(adev,
- adev->gfx.rlc.cs_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- }
-
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- if (r)
- return r;
- }
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
@@ -1817,22 +1768,6 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
-{
- int i;
- int r;
-
- r = gfx_v10_0_init_csb(adev);
- if (r)
- return r;
-
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
-
- /* TODO: init power gating */
- return 0;
-}
-
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
@@ -1925,21 +1860,16 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
- if (r)
- return r;
- r = gfx_v10_0_init_pg(adev);
+ r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
if (r)
return r;
- /* enable RLC SRM */
- gfx_v10_0_rlc_enable_srm(adev);
+ gfx_v10_0_init_csb(adev);
+ if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
+ gfx_v10_0_rlc_enable_srm(adev);
} else {
adev->gfx.rlc.funcs->stop(adev);
@@ -1961,9 +1891,7 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- r = gfx_v10_0_init_pg(adev);
- if (r)
- return r;
+ gfx_v10_0_init_csb(adev);
adev->gfx.rlc.funcs->start(adev);
@@ -2825,7 +2753,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
/* Init gfx ring 0 for pipe 0 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
- mutex_unlock(&adev->srbm_mutex);
+
/* Set ring buffer size */
ring = &adev->gfx.gfx_ring[0];
rb_bufsz = order_base_2(ring->ring_size / 8);
@@ -2863,11 +2791,11 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+ mutex_unlock(&adev->srbm_mutex);
/* Init gfx ring 1 for pipe 1 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
- mutex_unlock(&adev->srbm_mutex);
ring = &adev->gfx.gfx_ring[1];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
@@ -2897,6 +2825,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+ mutex_unlock(&adev->srbm_mutex);
/* Switch to pipe 0 */
mutex_lock(&adev->srbm_mutex);
@@ -3775,10 +3704,6 @@ static int gfx_v10_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gfx_v10_0_csb_vram_pin(adev);
- if (r)
- return r;
-
if (!amdgpu_emu_mode)
gfx_v10_0_init_golden_registers(adev);
@@ -3861,12 +3786,11 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_gfx_disable_kcq(adev))
DRM_ERROR("KCQ disable failed\n");
if (amdgpu_sriov_vf(adev)) {
- pr_debug("For SRIOV client, shouldn't do anything.\n");
+ gfx_v10_0_cp_gfx_enable(adev, false);
return 0;
}
gfx_v10_0_cp_enable(adev, false);
gfx_v10_0_enable_gui_idle_interrupt(adev, false);
- gfx_v10_0_csb_vram_unpin(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 791ba398f007..d92e92e5d50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4554,6 +4554,8 @@ static int gfx_v7_0_hw_init(void *handle)
gfx_v7_0_constants_init(adev);
+ /* init CSB */
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* init rlc */
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ffbde9136372..983db77999e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
- AMDGPU_GEM_DOMAIN_VRAM);
- if (!r)
- adev->gfx.rlc.clear_state_gpu_addr =
- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- return r;
-}
-
-static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
-
- if (!adev->gfx.rlc.clear_state_obj)
- return;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- }
-}
-
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -3917,6 +3884,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
{
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32(mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
@@ -4837,10 +4805,6 @@ static int gfx_v8_0_hw_init(void *handle)
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
- r = gfx_v8_0_csb_vram_pin(adev);
- if (r)
- return r;
-
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4958,8 +4922,6 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_err("rlc is busy, skip halt rlc\n");
amdgpu_gfx_rlc_exit_safe_mode(adev);
- gfx_v8_0_csb_vram_unpin(adev);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index faf2ffce5837..66328ffa395a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1695,39 +1695,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
- AMDGPU_GEM_DOMAIN_VRAM);
- if (!r)
- adev->gfx.rlc.clear_state_gpu_addr =
- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- return r;
-}
-
-static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
-
- if (!adev->gfx.rlc.clear_state_obj)
- return;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- }
-}
-
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -2415,6 +2382,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
{
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
adev->gfx.rlc.clear_state_gpu_addr >> 32);
@@ -3706,10 +3674,6 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_constants_init(adev);
- r = gfx_v9_0_csb_vram_pin(adev);
- if (r)
- return r;
-
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3791,8 +3755,6 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
adev->gfx.rlc.funcs->stop(adev);
- gfx_v9_0_csb_vram_unpin(adev);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index 5e9ab8eb214a..c0ab71df0d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -33,16 +33,31 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
u32 max_region =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
+ u32 max_num_physical_nodes = 0;
+ u32 max_physical_node_id = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ max_num_physical_nodes = 4;
+ max_physical_node_id = 3;
+ break;
+ case CHIP_ARCTURUS:
+ max_num_physical_nodes = 8;
+ max_physical_node_id = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
/* PF_MAX_REGION=0 means xgmi is disabled */
if (max_region) {
adev->gmc.xgmi.num_physical_nodes = max_region + 1;
- if (adev->gmc.xgmi.num_physical_nodes > 4)
+ if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
return -EINVAL;
adev->gmc.xgmi.physical_node_id =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
- if (adev->gmc.xgmi.physical_node_id > 3)
+ if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
return -EINVAL;
adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 321f8a997be8..232469507446 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -326,7 +326,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
if (!adev->mman.buffer_funcs_enabled ||
!adev->ib_pool_ready ||
- adev->in_gpu_reset) {
+ adev->in_gpu_reset ||
+ ring->sched.ready == false) {
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
mutex_unlock(&adev->mman.gtt_window_lock);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 29024e64c886..f2d70a47a3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1644,7 +1644,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
static void si_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1679,12 +1678,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1693,14 +1687,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -1717,15 +1714,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
}
for (i = 0; i < 10; i++) {
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -1737,25 +1742,44 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
mdelay(100);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
+
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -1768,15 +1792,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 78e5cdc0c058..f1b171e30774 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -783,10 +783,13 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
- if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
r = smu7_asic_baco_reset(adev);
- else
+ } else {
r = vi_asic_pci_config_reset(adev);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index a1a35d4d594b..ba0e68057a89 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64)
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER
help
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 55a520a63712..778f186b3a05 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -342,7 +342,8 @@ bool dm_pp_get_clock_levels_by_type(
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
- /* Error in pplib. Provide default values. */
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
return true;
}
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 921a36668ced..ac8c18fadefc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1037,6 +1037,25 @@ void dcn20_pipe_control_lock(
if (pipe->plane_state != NULL)
flip_immediate = pipe->plane_state->flip_immediate;
+ if (flip_immediate && lock) {
+ const int TIMEOUT_FOR_FLIP_PENDING = 100000;
+ int i;
+
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
+ if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp))
+ break;
+ udelay(1);
+ }
+
+ if (pipe->bottom_pipe != NULL) {
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
+ if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp))
+ break;
+ udelay(1);
+ }
+ }
+ }
+
/* In flip immediate and pipe splitting case, we need to use GSL
* for synchronization. Only do setup on locking and on flip type change.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index bbd1c98564be..09793336d84f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -157,6 +157,74 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.xfc_fill_constant_bytes = 0,
};
+struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
+ .odm_capable = 1,
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .num_dsc = 5,
+ .rob_buffer_size_kbytes = 168,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 4,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0,
+ .line_buffer_fixed_bpp = 0,
+ .dcc_supported = true,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 12,
+ .writeback_max_vscl_taps = 12,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 5,
+ .max_num_dpp = 5,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 8,
+ .max_vscl_ratio = 8,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 32, //
+ .dppclk_delay_subtotal = 77, //
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 8,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 87, //
+ .dcfclk_cstate_latency = 10, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .xfc_supported = true,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .ptoi_supported = 0
+};
+
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
/* Defaults that get patched on driver load from firmware. */
.clock_limits = {
@@ -854,6 +922,8 @@ static const struct resource_caps res_cap_nv14 = {
.num_pll = 5,
.num_dwb = 1,
.num_ddc = 5,
+ .num_vmid = 16,
+ .num_dsc = 5,
};
static const struct dc_debug_options debug_defaults_drv = {
@@ -3212,6 +3282,10 @@ static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
uint32_t hw_internal_rev)
{
+ /* NV14 */
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_ip;
+
/* NV12 and NV10 */
return &dcn2_0_ip;
}
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 40b546c75fc2..5ff7ccedfbed 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -2548,3 +2548,12 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
return ret;
}
+
+int smu_send_smc_msg(struct smu_context *smu,
+ enum smu_message_type msg)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu, msg, 0);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 58c7c4a3053e..ce3566ca3e24 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -2130,7 +2130,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
- .send_smc_msg = smu_v11_0_send_msg,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
.read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 031e0c22fcc7..ac9758305ab3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -497,8 +497,8 @@ struct pptable_funcs {
int (*notify_memory_pool_location)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
- int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
- int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param);
+ int (*send_smc_msg_with_param)(struct smu_context *smu,
+ enum smu_message_type msg, uint32_t param);
int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 606149085683..719844257713 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -177,10 +177,9 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v11_0_system_features_control(struct smu_context *smu,
bool en);
-int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
-
int
-smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+smu_v11_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
uint32_t param);
int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 9b9f5df0911c..9d81d789c713 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -44,10 +44,9 @@ int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
int smu_v12_0_wait_for_response(struct smu_context *smu);
-int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
-
int
-smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+smu_v12_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
uint32_t param);
int smu_v12_0_check_fw_status(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index aaec884d63ed..4a14fd1f9fd5 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -2055,7 +2055,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
- .send_smc_msg = smu_v11_0_send_msg,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
.read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 04daf7e9fe05..977bdd962e98 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -697,7 +697,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.check_fw_version = smu_v12_0_check_fw_version,
.powergate_sdma = smu_v12_0_powergate_sdma,
.powergate_vcn = smu_v12_0_powergate_vcn,
- .send_smc_msg = smu_v12_0_send_msg,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
.read_smc_arg = smu_v12_0_read_arg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 8bcda7871309..8872f8b2d502 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -75,8 +75,8 @@
#define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_send_smc_msg(smu, msg) \
- ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0)
+int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
+
#define smu_send_smc_msg_with_param(smu, msg, param) \
((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
#define smu_read_smc_arg(smu, arg) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index fc9679ea2368..e4268a627eff 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -90,36 +90,11 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0, index = 0;
-
- index = smu_msg_get_index(smu, msg);
- if (index < 0)
- return index;
-
- smu_v11_0_wait_for_response(smu);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
-
- ret = smu_v11_0_wait_for_response(smu);
-
- if (ret)
- pr_err("failed send message: %10s (%d) response %#x\n",
- smu_get_message_name(smu, msg), index, ret);
-
- return ret;
-
-}
-
int
-smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+smu_v11_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
uint32_t param)
{
-
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 139dd737eaa5..094cfc46adac 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -77,33 +77,9 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0, index = 0;
-
- index = smu_msg_get_index(smu, msg);
- if (index < 0)
- return index;
-
- smu_v12_0_wait_for_response(smu);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
-
- ret = smu_v12_0_wait_for_response(smu);
-
- if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x\n", index,
- ret);
-
- return ret;
-
-}
-
int
-smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+smu_v12_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
uint32_t param)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 0b4892833808..60b9ff097142 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3231,7 +3231,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
- .send_smc_msg = smu_v11_0_send_msg,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
.read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ae5809a1f19a..273dd80fabf3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3176,9 +3176,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_mst_topology_put_port(port);
}
- for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
+ for (i = 0; i < mgr->max_payloads; /* do nothing */) {
+ if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
+ i++;
continue;
+ }
DRM_DEBUG_KMS("removing payload %d\n", i);
for (j = i; j < mgr->max_payloads - 1; j++) {
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 892ce636ef72..6ee04803c362 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -561,7 +561,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
struct drm_property_blob *blob;
int ret;
- if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+ if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 0b1f786a7ce9..438040ff0179 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -7,7 +7,6 @@ config DRM_I915_WERROR
# We use the dependency on !COMPILE_TEST to not be enabled in
# allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
- select HEADER_TEST
default n
help
Add -Werror to the build flags for (and only for) i915.ko.
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 1799537a3228..c280b6ae38eb 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -25,7 +25,7 @@ config DRM_I915_HEARTBEAT_INTERVAL
config DRM_I915_PREEMPT_TIMEOUT
int "Preempt timeout (ms, jiffy granularity)"
- default 100 # milliseconds
+ default 640 # milliseconds
help
How long to wait (in milliseconds) for a preemption event to occur
when submitting a new context via execlists. If the current context
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0caef2592a7e..ed8c7ce62119 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1273,7 +1273,9 @@ static u8 icl_calc_voltage_level(int cdclk)
static u8 ehl_calc_voltage_level(int cdclk)
{
- if (cdclk > 312000)
+ if (cdclk > 326400)
+ return 3;
+ else if (cdclk > 312000)
return 2;
else if (cdclk > 180000)
return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0d6e494b4508..c7c2b349858d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -593,7 +593,7 @@ struct tgl_dkl_phy_ddi_buf_trans {
u32 dkl_de_emphasis_control;
};
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
{ 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
@@ -607,6 +607,20 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
{ 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
};
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -898,7 +912,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
default_entry = n_entries - 1;
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
@@ -2371,7 +2385,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
- n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
@@ -2823,8 +2837,13 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
- n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
- ddi_translations = tgl_dkl_phy_ddi_translations;
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
+ } else {
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ ddi_translations = tgl_dkl_phy_dp_ddi_trans;
+ }
if (level >= n_entries)
level = n_entries - 1;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index c61ac0c3acb5..050655a1a3d8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5476,15 +5476,13 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
return I915_READ(GEN8_DE_PORT_ISR) & bit;
}
-static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port)
+static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
+ enum phy phy)
{
- enum port port = intel_dig_port->base.port;
-
- if (HAS_PCH_MCC(dev_priv) && port == PORT_C)
+ if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
- return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
+ return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
}
static bool icl_digital_port_connected(struct intel_encoder *encoder)
@@ -5494,7 +5492,7 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (intel_phy_is_combo(dev_priv, phy))
- return icl_combo_port_connected(dev_priv, dig_port);
+ return intel_combo_phy_connected(dev_priv, phy);
else if (intel_phy_is_tc(dev_priv, phy))
return intel_tc_port_connected(dig_port);
else
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index e553ca8d98eb..337ba17b1e0e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -368,7 +368,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (!ce->timeline)
return NULL;
- rcu_read_lock();
+ mutex_lock(&ce->timeline->mutex);
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
if (i915_request_completed(rq))
break;
@@ -378,7 +378,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (engine)
break;
}
- rcu_read_unlock();
+ mutex_unlock(&ce->timeline->mutex);
return engine;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ee9d2bcd2c13..ef7bc41ffffa 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -310,10 +310,23 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
GEM_BUG_ON(rq->hw_context == ce);
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
- err = mutex_lock_interruptible_nested(&tl->mutex,
- SINGLE_DEPTH_NESTING);
- if (err)
- return err;
+ /*
+ * Ideally, we just want to insert our foreign fence as
+ * a barrier into the remove context, such that this operation
+ * occurs after all current operations in that context, and
+ * all future operations must occur after this.
+ *
+ * Currently, the timeline->last_request tracking is guarded
+ * by its mutex and so we must obtain that to atomically
+ * insert our barrier. However, since we already hold our
+ * timeline->mutex, we must be careful against potential
+ * inversion if we are the kernel_context as the remote context
+ * will itself poke at the kernel_context when it needs to
+ * unpin. Ergo, if already locked, we drop both locks and
+ * try again (through the magic of userspace repeating EAGAIN).
+ */
+ if (!mutex_trylock(&tl->mutex))
+ return -EAGAIN;
/* Queue this switch after current activity by this context. */
err = i915_active_fence_set(&tl->last_request, rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index bc3b72bfa9e3..01765a7ec18f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -100,9 +100,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
static inline struct i915_request *
execlists_active(const struct intel_engine_execlists *execlists)
{
- GEM_BUG_ON(execlists->active - execlists->inflight >
- execlists_num_ports(execlists));
- return READ_ONCE(*execlists->active);
+ return *READ_ONCE(execlists->active);
}
static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5ca3ec911e50..813bd3a610d2 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -28,13 +28,13 @@
#include "i915_drv.h"
-#include "gt/intel_gt.h"
-
+#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_engine_user.h"
-#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_gt_requests.h"
#include "intel_lrc.h"
#include "intel_reset.h"
#include "intel_ring.h"
@@ -616,6 +616,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_execlists(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
+ intel_engine_init_retire(engine);
intel_engine_pool_init(&engine->pool);
@@ -838,6 +839,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
+ intel_engine_fini_retire(engine);
intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 874d82677179..c1dd0cd3efc7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -73,8 +73,42 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
+static void
+__queue_and_release_pm(struct i915_request *rq,
+ struct intel_timeline *tl,
+ struct intel_engine_cs *engine)
+{
+ struct intel_gt_timelines *timelines = &engine->gt->timelines;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * We have to serialise all potential retirement paths with our
+ * submission, as we don't want to underflow either the
+ * engine->wakeref.counter or our timeline->active_count.
+ *
+ * Equally, we cannot allow a new submission to start until
+ * after we finish queueing, nor could we allow that submitter
+ * to retire us before we are ready!
+ */
+ spin_lock(&timelines->lock);
+
+ /* Let intel_gt_retire_requests() retire us (acquired under lock) */
+ if (!atomic_fetch_inc(&tl->active_count))
+ list_add_tail(&tl->link, &timelines->active_list);
+
+ /* Hand the request over to HW and so engine_retire() */
+ __i915_request_queue(rq, NULL);
+
+ /* Let new submissions commence (and maybe retire this timeline) */
+ __intel_wakeref_defer_park(&engine->wakeref);
+
+ spin_unlock(&timelines->lock);
+}
+
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
+ struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
unsigned long flags;
bool result = true;
@@ -98,16 +132,31 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* This should hold true as we can only park the engine after
* retiring the last request, thus all rings should be empty and
* all timelines idle.
+ *
+ * For unlocking, there are 2 other parties and the GPU who have a
+ * stake here.
+ *
+ * A new gpu user will be waiting on the engine-pm to start their
+ * engine_unpark. New waiters are predicated on engine->wakeref.count
+ * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
+ * engine->wakeref.
+ *
+ * The other party is intel_gt_retire_requests(), which is walking the
+ * list of active timelines looking for completions. Meanwhile as soon
+ * as we call __i915_request_queue(), the GPU may complete our request.
+ * Ergo, if we put ourselves on the timelines.active_list
+ * (se intel_timeline_enter()) before we increment the
+ * engine->wakeref.count, we may see the request completion and retire
+ * it causing an undeflow of the engine->wakeref.
*/
- flags = __timeline_mark_lock(engine->kernel_context);
+ flags = __timeline_mark_lock(ce);
+ GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
- rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
+ rq = __i915_request_create(ce, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
goto out_unlock;
- intel_timeline_enter(i915_request_timeline(rq));
-
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
i915_request_add_active_barriers(rq);
@@ -116,13 +165,12 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_commit(rq);
- /* Release our exclusive hold on the engine */
- __intel_wakeref_defer_park(&engine->wakeref);
- __i915_request_queue(rq, NULL);
+ /* Expose ourselves to the world */
+ __queue_and_release_pm(rq, ce->timeline, engine);
result = false;
out_unlock:
- __timeline_mark_unlock(engine->kernel_context, flags);
+ __timeline_mark_unlock(ce, flags);
return result;
}
@@ -177,7 +225,8 @@ static int __engine_park(struct intel_wakeref *wf)
engine->execlists.no_priolist = false;
- intel_gt_pm_put(engine->gt);
+ /* While gt calls i915_vma_parked(), we have to break the lock cycle */
+ intel_gt_pm_put_async(engine->gt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 739c50fefcef..24e20344dc22 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -31,6 +31,16 @@ static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
intel_wakeref_put(&engine->wakeref);
}
+static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put_async(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
+{
+ intel_wakeref_unlock_wait(&engine->wakeref);
+}
+
void intel_engine_init__pm(struct intel_engine_cs *engine);
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 758f0e8ec672..17f1f1441efc 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -451,6 +451,14 @@ struct intel_engine_cs {
struct intel_engine_execlists execlists;
+ /*
+ * Keep track of completed timelines on this engine for early
+ * retirement with the goal of quickly enabling powersaving as
+ * soon as the engine is idle.
+ */
+ struct intel_timeline *retire;
+ struct work_struct retire_work;
+
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 6187cdd06646..a459a42ad5c2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -105,7 +105,6 @@ static int __gt_park(struct intel_wakeref *wf)
static const struct intel_wakeref_ops wf_ops = {
.get = __gt_unpark,
.put = __gt_park,
- .flags = INTEL_WAKEREF_PUT_ASYNC,
};
void intel_gt_pm_init_early(struct intel_gt *gt)
@@ -272,7 +271,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
static suspend_state_t pm_suspend_target(void)
{
-#if IS_ENABLED(CONFIG_PM_SLEEP)
+#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
return pm_suspend_target_state;
#else
return PM_SUSPEND_TO_IDLE;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index b3e17399be9b..990efc27a4e4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -32,6 +32,11 @@ static inline void intel_gt_pm_put(struct intel_gt *gt)
intel_wakeref_put(&gt->wakeref);
}
+static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+{
+ intel_wakeref_put_async(&gt->wakeref);
+}
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 353809ac2754..3dc13ecf41bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/workqueue.h>
+
#include "i915_drv.h" /* for_each_engine() */
#include "i915_request.h"
#include "intel_gt.h"
@@ -29,6 +31,79 @@ static void flush_submission(struct intel_gt *gt)
intel_engine_flush_submission(engine);
}
+static void engine_retire(struct work_struct *work)
+{
+ struct intel_engine_cs *engine =
+ container_of(work, typeof(*engine), retire_work);
+ struct intel_timeline *tl = xchg(&engine->retire, NULL);
+
+ do {
+ struct intel_timeline *next = xchg(&tl->retire, NULL);
+
+ /*
+ * Our goal here is to retire _idle_ timelines as soon as
+ * possible (as they are idle, we do not expect userspace
+ * to be cleaning up anytime soon).
+ *
+ * If the timeline is currently locked, either it is being
+ * retired elsewhere or about to be!
+ */
+ if (mutex_trylock(&tl->mutex)) {
+ retire_requests(tl);
+ mutex_unlock(&tl->mutex);
+ }
+ intel_timeline_put(tl);
+
+ GEM_BUG_ON(!next);
+ tl = ptr_mask_bits(next, 1);
+ } while (tl);
+}
+
+static bool add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl)
+{
+ struct intel_timeline *first;
+
+ /*
+ * We open-code a llist here to include the additional tag [BIT(0)]
+ * so that we know when the timeline is already on a
+ * retirement queue: either this engine or another.
+ *
+ * However, we rely on that a timeline can only be active on a single
+ * engine at any one time and that add_retire() is called before the
+ * engine releases the timeline and transferred to another to retire.
+ */
+
+ if (READ_ONCE(tl->retire)) /* already queued */
+ return false;
+
+ intel_timeline_get(tl);
+ first = READ_ONCE(engine->retire);
+ do
+ tl->retire = ptr_pack_bits(first, 1, 1);
+ while (!try_cmpxchg(&engine->retire, &first, tl));
+
+ return !first;
+}
+
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl)
+{
+ if (add_retire(engine, tl))
+ schedule_work(&engine->retire_work);
+}
+
+void intel_engine_init_retire(struct intel_engine_cs *engine)
+{
+ INIT_WORK(&engine->retire_work, engine_retire);
+}
+
+void intel_engine_fini_retire(struct intel_engine_cs *engine)
+{
+ flush_work(&engine->retire_work);
+ GEM_BUG_ON(engine->retire);
+}
+
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
{
struct intel_gt_timelines *timelines = &gt->timelines;
@@ -52,8 +127,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
}
intel_timeline_get(tl);
- GEM_BUG_ON(!tl->active_count);
- tl->active_count++; /* pin the list element */
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
spin_unlock_irqrestore(&timelines->lock, flags);
if (timeout > 0) {
@@ -74,7 +149,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
- if (!--tl->active_count)
+ if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
else
active_count += !!rcu_access_pointer(tl->last_request.fence);
@@ -83,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
- GEM_BUG_ON(tl->active_count);
+ GEM_BUG_ON(atomic_read(&tl->active_count));
list_add(&tl->link, &free);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index bd31cbce47e0..d626fb115386 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -7,7 +7,9 @@
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
+struct intel_engine_cs;
struct intel_gt;
+struct intel_timeline;
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
static inline void intel_gt_retire_requests(struct intel_gt *gt)
@@ -15,6 +17,11 @@ static inline void intel_gt_retire_requests(struct intel_gt *gt)
intel_gt_retire_requests_timeout(gt, 0);
}
+void intel_engine_init_retire(struct intel_engine_cs *engine);
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl);
+void intel_engine_fini_retire(struct intel_engine_cs *engine);
+
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
void intel_gt_init_requests(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 0ac3b26674ad..9fdefbdc3546 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -142,6 +142,7 @@
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
@@ -1115,9 +1116,17 @@ __execlists_schedule_out(struct i915_request *rq,
* refrain from doing non-trivial work here.
*/
+ /*
+ * If we have just completed this context, the engine may now be
+ * idle and we want to re-enter powersaving.
+ */
+ if (list_is_last(&rq->link, &ce->timeline->requests) &&
+ i915_request_completed(rq))
+ intel_engine_add_retire(engine, ce->timeline);
+
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
- intel_gt_pm_put(engine->gt);
+ intel_gt_pm_put_async(engine->gt);
/*
* If this is part of a virtual engine, its next request may
@@ -1937,16 +1946,17 @@ skip_submit:
static void
cancel_port_requests(struct intel_engine_execlists * const execlists)
{
- struct i915_request * const *port, *rq;
+ struct i915_request * const *port;
- for (port = execlists->pending; (rq = *port); port++)
- execlists_schedule_out(rq);
+ for (port = execlists->pending; *port; port++)
+ execlists_schedule_out(*port);
memset(execlists->pending, 0, sizeof(execlists->pending));
- for (port = execlists->active; (rq = *port); port++)
- execlists_schedule_out(rq);
- execlists->active =
- memset(execlists->inflight, 0, sizeof(execlists->inflight));
+ /* Mark the end of active before we overwrite *active */
+ for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ execlists_schedule_out(*port);
+ WRITE_ONCE(execlists->active,
+ memset(execlists->inflight, 0, sizeof(execlists->inflight)));
}
static inline void
@@ -2099,23 +2109,27 @@ static void process_csb(struct intel_engine_cs *engine)
else
promote = gen8_csb_parse(execlists, buf + 2 * head);
if (promote) {
+ struct i915_request * const *old = execlists->active;
+
+ /* Point active to the new ELSP; prevent overwriting */
+ WRITE_ONCE(execlists->active, execlists->pending);
+ set_timeslice(engine);
+
if (!inject_preempt_hang(execlists))
ring_set_paused(engine, 0);
/* cancel old inflight, prepare for switch */
- trace_ports(execlists, "preempted", execlists->active);
- while (*execlists->active)
- execlists_schedule_out(*execlists->active++);
+ trace_ports(execlists, "preempted", old);
+ while (*old)
+ execlists_schedule_out(*old++);
/* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- execlists->active =
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending));
-
- set_timeslice(engine);
+ WRITE_ONCE(execlists->active,
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending)));
WRITE_ONCE(execlists->pending[0], NULL);
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index f03e000051c1..c97423a76642 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1114,7 +1114,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
out:
intel_engine_cancel_stop_cs(engine);
reset_finish_engine(engine);
- intel_engine_pm_put(engine);
+ intel_engine_pm_put_async(engine);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index ece20504d240..374b28f13ca0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -57,9 +57,10 @@ int intel_ring_pin(struct intel_ring *ring)
i915_vma_make_unshrinkable(vma);
- GEM_BUG_ON(ring->vaddr);
- ring->vaddr = addr;
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->emit);
+ ring->vaddr = addr;
return 0;
err_ring:
@@ -85,20 +86,14 @@ void intel_ring_unpin(struct intel_ring *ring)
if (!atomic_dec_and_test(&ring->pin_count))
return;
- /* Discard any unused bytes beyond that submitted to hw. */
- intel_ring_reset(ring, ring->emit);
-
i915_vma_unset_ggtt_write(vma);
if (i915_vma_is_map_and_fenceable(vma))
i915_vma_unpin_iomap(vma);
else
i915_gem_object_unpin_map(vma->obj);
- GEM_BUG_ON(!ring->vaddr);
- ring->vaddr = NULL;
-
- i915_vma_unpin(vma);
i915_vma_make_purgeable(vma);
+ i915_vma_unpin(vma);
}
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 14ad10acd548..649798c184fb 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -282,6 +282,7 @@ void intel_timeline_fini(struct intel_timeline *timeline)
{
GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests));
+ GEM_BUG_ON(timeline->retire);
if (timeline->hwsp_cacheline)
cacheline_free(timeline->hwsp_cacheline);
@@ -339,15 +340,33 @@ void intel_timeline_enter(struct intel_timeline *tl)
struct intel_gt_timelines *timelines = &tl->gt->timelines;
unsigned long flags;
+ /*
+ * Pretend we are serialised by the timeline->mutex.
+ *
+ * While generally true, there are a few exceptions to the rule
+ * for the engine->kernel_context being used to manage power
+ * transitions. As the engine_park may be called from under any
+ * timeline, it uses the power mutex as a global serialisation
+ * lock to prevent any other request entering its timeline.
+ *
+ * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
+ *
+ * However, intel_gt_retire_request() does not know which engine
+ * it is retiring along and so cannot partake in the engine-pm
+ * barrier, and there we use the tl->active_count as a means to
+ * pin the timeline in the active_list while the locks are dropped.
+ * Ergo, as that is outside of the engine-pm barrier, we need to
+ * use atomic to manipulate tl->active_count.
+ */
lockdep_assert_held(&tl->mutex);
-
GEM_BUG_ON(!atomic_read(&tl->pin_count));
- if (tl->active_count++)
+
+ if (atomic_add_unless(&tl->active_count, 1, 0))
return;
- GEM_BUG_ON(!tl->active_count); /* overflow? */
spin_lock_irqsave(&timelines->lock, flags);
- list_add(&tl->link, &timelines->active_list);
+ if (!atomic_fetch_inc(&tl->active_count))
+ list_add_tail(&tl->link, &timelines->active_list);
spin_unlock_irqrestore(&timelines->lock, flags);
}
@@ -356,14 +375,16 @@ void intel_timeline_exit(struct intel_timeline *tl)
struct intel_gt_timelines *timelines = &tl->gt->timelines;
unsigned long flags;
+ /* See intel_timeline_enter() */
lockdep_assert_held(&tl->mutex);
- GEM_BUG_ON(!tl->active_count);
- if (--tl->active_count)
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ if (atomic_add_unless(&tl->active_count, -1, 1))
return;
spin_lock_irqsave(&timelines->lock, flags);
- list_del(&tl->link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
spin_unlock_irqrestore(&timelines->lock, flags);
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 98d9ee166379..aaf15cbe1ce1 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -42,7 +42,7 @@ struct intel_timeline {
* from the intel_context caller plus internal atomicity.
*/
atomic_t pin_count;
- unsigned int active_count;
+ atomic_t active_count;
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
@@ -66,6 +66,9 @@ struct intel_timeline {
*/
struct i915_active_fence last_request;
+ /** A chain of completed timelines ready for early retirement. */
+ struct intel_timeline *retire;
+
/**
* We track the most recent seqno that we wait on in every context so
* that we only have to emit a new await and dependency on a more
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 20b9c83f43ad..cbf6b0735272 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -51,11 +51,12 @@ static int live_engine_pm(void *arg)
pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
engine->name, p->name);
else
- intel_engine_pm_put(engine);
- intel_engine_pm_put(engine);
+ intel_engine_pm_put_async(engine);
+ intel_engine_pm_put_async(engine);
p->critical_section_end();
- /* engine wakeref is sync (instant) */
+ intel_engine_pm_flush(engine);
+
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is still awake after flushing pm\n",
engine->name);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6a3ac8cde95d..21a176cd8acc 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1599,9 +1599,9 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
if (!(cmd_val(s, 0) & (1 << 22)))
return ret;
- /* check if QWORD */
- if (DWORD_FIELD(0, 20, 19) == 1)
- valid_len += 8;
+ /* check inline data */
+ if (cmd_val(s, 0) & BIT(18))
+ valid_len = CMD_LEN(9);
ret = gvt_check_valid_cmd_length(cmd_length(s),
valid_len);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bd12af349123..bb9fe6bf5275 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -460,6 +460,7 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static i915_reg_t force_nonpriv_white_list[] = {
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
+ PS_INVOCATION_COUNT,//_MMIO(0x2348)
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
_MMIO(0x2690),
_MMIO(0x2694),
@@ -508,7 +509,7 @@ static inline bool in_whitelist(unsigned int reg)
static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- u32 reg_nonpriv = *(u32 *)p_data;
+ u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
u32 ring_base;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -528,7 +529,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
bytes);
} else
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
- vgpu->id, reg_nonpriv, offset);
+ vgpu->id, *(u32 *)p_data, offset);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 3c424cb90702..a19e7d89bc8a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -672,12 +672,13 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them.
*/
- spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node);
struct rb_node **p, *parent;
+ spin_lock_irqsave_nested(&ref->tree_lock, flags,
+ SINGLE_DEPTH_NESTING);
parent = NULL;
p = &ref->tree.rb_node;
while (*p) {
@@ -693,12 +694,12 @@ void i915_active_acquire_barrier(struct i915_active *ref)
}
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- spin_unlock_irqrestore(&ref->tree_lock, flags);
}
void i915_request_add_active_barriers(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 0d40dccd1409..2814218c5ba1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -190,7 +190,7 @@ static u64 get_rc6(struct intel_gt *gt)
val = 0;
if (intel_gt_pm_get_if_awake(gt)) {
val = __get_rc6(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put_async(gt);
}
spin_lock_irqsave(&pmu->lock, flags);
@@ -343,7 +343,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
skip:
spin_unlock_irqrestore(&engine->uncore->lock, flags);
- intel_engine_pm_put(engine);
+ intel_engine_pm_put_async(engine);
}
}
@@ -368,7 +368,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (intel_gt_pm_get_if_awake(gt)) {
val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
val = intel_get_cagf(rps, val);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put_async(gt);
}
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index c27cfef9281c..ef25ce6e395e 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -103,15 +103,18 @@ query_engine_info(struct drm_i915_private *i915,
struct drm_i915_engine_info __user *info_ptr;
struct drm_i915_query_engine_info query;
struct drm_i915_engine_info info = { };
+ unsigned int num_uabi_engines = 0;
struct intel_engine_cs *engine;
int len, ret;
if (query_item->flags)
return -EINVAL;
+ for_each_uabi_engine(engine, i915)
+ num_uabi_engines++;
+
len = sizeof(struct drm_i915_query_engine_info) +
- RUNTIME_INFO(i915)->num_engines *
- sizeof(struct drm_i915_engine_info);
+ num_uabi_engines * sizeof(struct drm_i915_engine_info);
ret = copy_query_item(&query, sizeof(query), len, query_item);
if (ret != 0)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 868cc78048d0..59aa1b6f1827 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -54,7 +54,8 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf)
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
- if (!atomic_dec_and_test(&wf->count))
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+ if (unlikely(!atomic_dec_and_test(&wf->count)))
goto unlock;
/* ops->put() must reschedule its own release on error/deferral */
@@ -67,13 +68,12 @@ unlock:
mutex_unlock(&wf->mutex);
}
-void __intel_wakeref_put_last(struct intel_wakeref *wf)
+void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
{
INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
/* Assume we are not in process context and so cannot sleep. */
- if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
- !mutex_trylock(&wf->mutex)) {
+ if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
schedule_work(&wf->work);
return;
}
@@ -109,8 +109,17 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
{
- return wait_var_event_killable(&wf->wakeref,
- !intel_wakeref_is_active(wf));
+ int err;
+
+ might_sleep();
+
+ err = wait_var_event_killable(&wf->wakeref,
+ !intel_wakeref_is_active(wf));
+ if (err)
+ return err;
+
+ intel_wakeref_unlock_wait(wf);
+ return 0;
}
static void wakeref_auto_timeout(struct timer_list *t)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 5f0c972a80fb..da6e8fd506e6 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -9,6 +9,7 @@
#include <linux/atomic.h>
#include <linux/bits.h>
+#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/stackdepot.h>
@@ -29,9 +30,6 @@ typedef depot_stack_handle_t intel_wakeref_t;
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
-
- unsigned long flags;
-#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
};
struct intel_wakeref {
@@ -57,7 +55,7 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
} while (0)
int __intel_wakeref_get_first(struct intel_wakeref *wf);
-void __intel_wakeref_put_last(struct intel_wakeref *wf);
+void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
/**
* intel_wakeref_get: Acquire the wakeref
@@ -100,10 +98,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
}
/**
- * intel_wakeref_put: Release the wakeref
- * @i915: the drm_i915_private device
+ * intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
- * @fn: callback for releasing the wakeref, called only on final release.
+ * @flags: control flags
*
* Release our hold on the wakeref. When there are no more users,
* the runtime pm wakeref will be released after the @fn callback is called
@@ -116,11 +113,25 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
* code otherwise.
*/
static inline void
-intel_wakeref_put(struct intel_wakeref *wf)
+__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
{
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
- __intel_wakeref_put_last(wf);
+ __intel_wakeref_put_last(wf, flags);
+}
+
+static inline void
+intel_wakeref_put(struct intel_wakeref *wf)
+{
+ might_sleep();
+ __intel_wakeref_put(wf, 0);
+}
+
+static inline void
+intel_wakeref_put_async(struct intel_wakeref *wf)
+{
+ __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
}
/**
@@ -152,6 +163,21 @@ intel_wakeref_unlock(struct intel_wakeref *wf)
}
/**
+ * intel_wakeref_unlock_wait: Wait until the active callback is complete
+ * @wf: the wakeref
+ *
+ * Waits for the active callback (under the @wf->mutex or another CPU) is
+ * complete.
+ */
+static inline void
+intel_wakeref_unlock_wait(struct intel_wakeref *wf)
+{
+ mutex_lock(&wf->mutex);
+ mutex_unlock(&wf->mutex);
+ flush_work(&wf->work);
+}
+
+/**
* intel_wakeref_is_active: Query whether the wakeref is currently held
* @wf: the wakeref
*
@@ -170,6 +196,7 @@ intel_wakeref_is_active(const struct intel_wakeref *wf)
static inline void
__intel_wakeref_defer_park(struct intel_wakeref *wf)
{
+ lockdep_assert_held(&wf->mutex);
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
atomic_set_release(&wf->count, 1);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 397f8b0a9af8..d43951caeea0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -30,6 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400);
static struct drm_driver driver;
static const struct pci_device_id pciidlist[] = {
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0,
+ G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
@@ -60,6 +62,35 @@ static void mga_pci_remove(struct pci_dev *pdev)
DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
+static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
+{
+ return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
+}
+
+int mgag200_driver_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct mga_device *mdev = dev->dev_private;
+ unsigned long pg_align;
+
+ if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
+ return -EINVAL;
+
+ pg_align = 0ul;
+
+ /*
+ * Aligning scanout buffers to the size of the video ram forces
+ * placement at offset 0. Works around a bug where HW does not
+ * respect 'startadd' field.
+ */
+ if (mgag200_pin_bo_at_0(mdev))
+ pg_align = PFN_UP(mdev->mc.vram_size);
+
+ return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev,
+ pg_align, false, args);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = mgag200_driver_load,
@@ -71,7 +102,10 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- DRM_GEM_VRAM_DRIVER
+ .debugfs_init = drm_vram_mm_debugfs_init,
+ .dumb_create = mgag200_driver_dumb_create,
+ .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
+ .gem_prime_mmap = drm_gem_prime_mmap,
};
static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 0ea9a525e57d..aa32aad222c2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -150,6 +150,12 @@ enum mga_type {
G200_EW3,
};
+/* HW does not handle 'startadd' field correct. */
+#define MGAG200_FLAG_HW_BUG_NO_STARTADD (1ul << 8)
+
+#define MGAG200_TYPE_MASK (0x000000ff)
+#define MGAG200_FLAG_MASK (0x00ffff00)
+
#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
struct mga_device {
@@ -181,6 +187,18 @@ struct mga_device {
u32 unique_rev_id;
};
+static inline enum mga_type
+mgag200_type_from_driver_data(kernel_ulong_t driver_data)
+{
+ return (enum mga_type)(driver_data & MGAG200_TYPE_MASK);
+}
+
+static inline unsigned long
+mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
+{
+ return driver_data & MGAG200_FLAG_MASK;
+}
+
/* mgag200_mode.c */
int mgag200_modeset_init(struct mga_device *mdev);
void mgag200_modeset_fini(struct mga_device *mdev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 5f74aabcd3df..e1bc5b0aa774 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -94,7 +94,8 @@ static int mgag200_device_init(struct drm_device *dev,
struct mga_device *mdev = dev->dev_private;
int ret, option;
- mdev->type = flags;
+ mdev->flags = mgag200_flags_from_driver_data(flags);
+ mdev->type = mgag200_type_from_driver_data(flags);
/* Hardcode the number of CRTCs to 1 */
mdev->num_crtc = 1;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index e9160ce39cbb..6deaa7d01654 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -7,6 +7,7 @@ config DRM_MSM
depends on OF && COMMON_CLK
depends on MMU
depends on INTERCONNECT || !INTERCONNECT
+ depends on QCOM_OCMEM || QCOM_OCMEM=n
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 5f7e98028eaf..7ad14937fcdf 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -6,10 +6,6 @@
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
-#ifdef CONFIG_MSM_OCMEM
-# include <mach/ocmem.h>
-#endif
-
#include "a3xx_gpu.h"
#define A3XX_INT0_MASK \
@@ -195,9 +191,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
/* Set the OCMEM base address for A330, etc */
- if (a3xx_gpu->ocmem_hdl) {
+ if (a3xx_gpu->ocmem.hdl) {
gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
- (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+ (unsigned int)(a3xx_gpu->ocmem.base >> 14));
}
/* Turn on performance counters: */
@@ -318,10 +314,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
adreno_gpu_cleanup(adreno_gpu);
-#ifdef CONFIG_MSM_OCMEM
- if (a3xx_gpu->ocmem_base)
- ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
-#endif
+ adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
kfree(a3xx_gpu);
}
@@ -494,17 +487,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
/* if needed, allocate gmem: */
if (adreno_is_a330(adreno_gpu)) {
-#ifdef CONFIG_MSM_OCMEM
- /* TODO this is different/missing upstream: */
- struct ocmem_buf *ocmem_hdl =
- ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
-
- a3xx_gpu->ocmem_hdl = ocmem_hdl;
- a3xx_gpu->ocmem_base = ocmem_hdl->addr;
- adreno_gpu->gmem = ocmem_hdl->len;
- DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
- a3xx_gpu->ocmem_base);
-#endif
+ ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
+ adreno_gpu, &a3xx_gpu->ocmem);
+ if (ret)
+ goto fail;
}
if (!gpu->aspace) {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 5dc33e5ea53b..c555fb13e0d7 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -19,8 +19,7 @@ struct a3xx_gpu {
struct adreno_gpu base;
/* if OCMEM is used for GMEM: */
- uint32_t ocmem_base;
- void *ocmem_hdl;
+ struct adreno_ocmem ocmem;
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index ab2b752566d8..b01388a9e89e 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -2,9 +2,6 @@
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
#include "a4xx_gpu.h"
-#ifdef CONFIG_MSM_OCMEM
-# include <soc/qcom/ocmem.h>
-#endif
#define A4XX_INT0_MASK \
(A4XX_INT0_RBBM_AHB_ERROR | \
@@ -188,7 +185,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
(1 << 30) | 0xFFFF);
gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
- (unsigned int)(a4xx_gpu->ocmem_base >> 14));
+ (unsigned int)(a4xx_gpu->ocmem.base >> 14));
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
@@ -318,10 +315,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
adreno_gpu_cleanup(adreno_gpu);
-#ifdef CONFIG_MSM_OCMEM
- if (a4xx_gpu->ocmem_base)
- ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
-#endif
+ adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
kfree(a4xx_gpu);
}
@@ -578,17 +572,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
-#ifdef CONFIG_MSM_OCMEM
- /* TODO this is different/missing upstream: */
- struct ocmem_buf *ocmem_hdl =
- ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
-
- a4xx_gpu->ocmem_hdl = ocmem_hdl;
- a4xx_gpu->ocmem_base = ocmem_hdl->addr;
- adreno_gpu->gmem = ocmem_hdl->len;
- DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
- a4xx_gpu->ocmem_base);
-#endif
+ ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
+ &a4xx_gpu->ocmem);
+ if (ret)
+ goto fail;
}
if (!gpu->aspace) {
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index d506311ee240..a01448cba2ea 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -16,8 +16,7 @@ struct a4xx_gpu {
struct adreno_gpu base;
/* if OCMEM is used for GMEM: */
- uint32_t ocmem_base;
- void *ocmem_hdl;
+ struct adreno_ocmem ocmem;
};
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index e9c55d1d6c04..b02e2042547f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -353,6 +353,9 @@ static int a5xx_me_init(struct msm_gpu *gpu)
* 2D mode 3 draw
*/
OUT_RING(ring, 0x0000000B);
+ } else if (adreno_is_a510(adreno_gpu)) {
+ /* Workaround for token and syncs */
+ OUT_RING(ring, 0x00000001);
} else {
/* No workarounds enabled */
OUT_RING(ring, 0x00000000);
@@ -568,15 +571,24 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
- gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
- if (adreno_is_a530(adreno_gpu))
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
- if (adreno_is_a540(adreno_gpu))
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
- gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
- gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
-
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+ if (adreno_is_a510(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x200 << 11 | 0x200 << 22));
+ } else {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x400 << 11 | 0x300 << 22));
+ }
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
@@ -589,6 +601,19 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Enable ME/PFP split notification */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+ /*
+ * In A5x, CCU can send context_done event of a particular context to
+ * UCHE which ultimately reaches CP even when there is valid
+ * transaction of that context inside CCU. This can let CP to program
+ * config registers, which will make the "valid transaction" inside
+ * CCU to be interpreted differently. This can cause gpu fault. This
+ * bug is fixed in latest A510 revision. To enable this bug fix -
+ * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
+ * (disable). For older A510 version this bit is unused.
+ */
+ if (adreno_is_a510(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
+
/* Enable HWCG */
a5xx_set_hwcg(gpu, true);
@@ -635,7 +660,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
- if (adreno_is_a530(adreno_gpu))
+ if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
@@ -679,7 +704,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
a5xx_preempt_hw_init(gpu);
- a5xx_gpmu_ucode_init(gpu);
+ if (!adreno_is_a510(adreno_gpu))
+ a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
if (ret)
@@ -712,7 +738,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
}
/*
- * Try to load a zap shader into the secure world. If successful
+ * If the chip that we are using does support loading one, then
+ * try to load a zap shader into the secure world. If successful
* we can use the CP to switch out of secure mode. If not then we
* have no resource but to try to switch ourselves out manually. If we
* guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
@@ -1066,6 +1093,7 @@ static void a5xx_dump(struct msm_gpu *gpu)
static int a5xx_pm_resume(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Turn on the core power */
@@ -1073,6 +1101,15 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ if (adreno_is_a510(adreno_gpu)) {
+ /* Halt the sp_input_clk at HM level */
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
+ a5xx_set_hwcg(gpu, true);
+ /* Turn on sp_input_clk at HM level */
+ gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
+ return 0;
+ }
+
/* Turn the RBCCU domain first to limit the chances of voltage droop */
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
@@ -1101,9 +1138,17 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 mask = 0xf;
+
+ /* A510 has 3 XIN ports in VBIF */
+ if (adreno_is_a510(adreno_gpu))
+ mask = 0x7;
+
/* Clear the VBIF pipe before shutting down */
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
- spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
+ mask) == mask);
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
@@ -1289,7 +1334,7 @@ static void a5xx_gpu_state_destroy(struct kref *kref)
kfree(a5xx_state);
}
-int a5xx_gpu_state_put(struct msm_gpu_state *state)
+static int a5xx_gpu_state_put(struct msm_gpu_state *state)
{
if (IS_ERR_OR_NULL(state))
return 1;
@@ -1299,8 +1344,8 @@ int a5xx_gpu_state_put(struct msm_gpu_state *state)
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
- struct drm_printer *p)
+static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
{
int i, j;
u32 pos = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index a3a06db675ba..321a8061fd32 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -297,6 +297,10 @@ int a5xx_power_init(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
+ /* Not all A5xx chips have a GPMU */
+ if (adreno_is_a510(adreno_gpu))
+ return 0;
+
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
@@ -326,6 +330,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
+ if (adreno_is_a510(adreno_gpu))
+ return;
+
if (a5xx_gpu->gpmu_bo)
return;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 0888e0df660d..fbbdf86504f5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -115,6 +115,21 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 1, 0, ANY_ID),
+ .revn = 510,
+ .name = "A510",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .init = a5xx_gpu_init,
+ }, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 048c8be426f3..0783e4b5486a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -14,6 +14,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
+#include <soc/qcom/ocmem.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -893,6 +894,45 @@ static int adreno_get_pwrlevels(struct device *dev,
return 0;
}
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *adreno_ocmem)
+{
+ struct ocmem_buf *ocmem_hdl;
+ struct ocmem *ocmem;
+
+ ocmem = of_get_ocmem(dev);
+ if (IS_ERR(ocmem)) {
+ if (PTR_ERR(ocmem) == -ENODEV) {
+ /*
+ * Return success since either the ocmem property was
+ * not specified in device tree, or ocmem support is
+ * not compiled into the kernel.
+ */
+ return 0;
+ }
+
+ return PTR_ERR(ocmem);
+ }
+
+ ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
+ if (IS_ERR(ocmem_hdl))
+ return PTR_ERR(ocmem_hdl);
+
+ adreno_ocmem->ocmem = ocmem;
+ adreno_ocmem->base = ocmem_hdl->addr;
+ adreno_ocmem->hdl = ocmem_hdl;
+ adreno_gpu->gmem = ocmem_hdl->len;
+
+ return 0;
+}
+
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
+{
+ if (adreno_ocmem && adreno_ocmem->base)
+ ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
+ adreno_ocmem->hdl);
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c7441fb8313e..e71a7570ef72 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -126,6 +126,12 @@ struct adreno_gpu {
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
+struct adreno_ocmem {
+ struct ocmem *ocmem;
+ unsigned long base;
+ void *hdl;
+};
+
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
@@ -206,6 +212,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a510(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 510;
+}
+
static inline int adreno_is_a530(struct adreno_gpu *gpu)
{
return gpu->revn == 530;
@@ -236,6 +247,10 @@ void adreno_dump(struct msm_gpu *gpu);
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *ocmem);
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index cdbea38b8697..f1bc6a1af7a7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -55,8 +55,7 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
enum dpu_intr_type intr_type, u32 instance_idx)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.irq_idx_lookup)
+ if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
return -EINVAL;
return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
@@ -73,7 +72,7 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
unsigned long irq_flags;
int ret = 0, enable_count;
- if (!dpu_kms || !dpu_kms->hw_intr ||
+ if (!dpu_kms->hw_intr ||
!dpu_kms->irq_obj.enable_counts ||
!dpu_kms->irq_obj.irq_counts) {
DPU_ERROR("invalid params\n");
@@ -114,7 +113,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
- if (!dpu_kms || !irq_idxs || !irq_count) {
+ if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -138,7 +137,7 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
{
int ret = 0, enable_count;
- if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+ if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -169,7 +168,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
- if (!dpu_kms || !irq_idxs || !irq_count) {
+ if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -186,7 +185,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
+ if (!dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.get_interrupt_status)
return 0;
@@ -205,7 +204,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
{
unsigned long irq_flags;
- if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ if (!dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -240,7 +239,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
{
unsigned long irq_flags;
- if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ if (!dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -274,8 +273,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.clear_all_irqs)
+ if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
return;
dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
@@ -283,8 +281,7 @@ static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.disable_all_irqs)
+ if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
return;
dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
@@ -343,18 +340,8 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
{
- struct msm_drm_private *priv;
int i;
- if (!dpu_kms->dev) {
- DPU_ERROR("invalid drm device\n");
- return;
- } else if (!dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid device private\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_clear_all_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
@@ -379,18 +366,8 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
- struct msm_drm_private *priv;
int i;
- if (!dpu_kms->dev) {
- DPU_ERROR("invalid drm device\n");
- return;
- } else if (!dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid device private\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 09a49b59bb5b..11f2bebe3869 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -32,18 +32,7 @@ enum dpu_perf_mode {
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
-
- if (!crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid device\n");
- return NULL;
- }
-
priv = crtc->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid kms\n");
- return NULL;
- }
-
return to_dpu_kms(priv->kms);
}
@@ -116,7 +105,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms || !kms->catalog) {
+ if (!kms->catalog) {
DPU_ERROR("invalid parameters\n");
return 0;
}
@@ -215,7 +204,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
if (!crtc) {
@@ -224,13 +212,12 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms || !kms->catalog) {
+ if (!kms->catalog) {
DPU_ERROR("invalid kms\n");
return;
}
dpu_crtc = to_dpu_crtc(crtc);
- dpu_cstate = to_dpu_crtc_state(crtc->state);
if (atomic_dec_return(&kms->bandwidth_ref) > 0)
return;
@@ -287,7 +274,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
u64 clk_rate = 0;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
- struct msm_drm_private *priv;
struct dpu_kms *kms;
int ret;
@@ -297,11 +283,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms || !kms->catalog) {
+ if (!kms->catalog) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
- priv = kms->dev->dev_private;
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index ce59adff06aa..f197dce54576 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -266,11 +266,20 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- if (!crtc || !crtc->dev) {
+ if (!crtc) {
DPU_ERROR("invalid crtc\n");
return INTF_MODE_NONE;
}
+ /*
+ * TODO: This function is called from dpu debugfs and as part of atomic
+ * check. When called from debugfs, the crtc->mutex must be held to
+ * read crtc->state. However reading crtc->state from atomic check isn't
+ * allowed (unless you have a good reason, a big comment, and a deep
+ * understanding of how the atomic/modeset locks work (<- and this is
+ * probably not possible)). So we'll keep the WARN_ON here for now, but
+ * really we need to figure out a better way to track our operating mode
+ */
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
@@ -694,7 +703,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
unsigned long flags;
bool release_bandwidth = false;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+ if (!crtc || !crtc->state) {
DPU_ERROR("invalid crtc\n");
return;
}
@@ -766,7 +775,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
struct msm_drm_private *priv;
bool request_bandwidth;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
@@ -1288,13 +1297,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
{
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
- struct msm_drm_private *priv = NULL;
- struct dpu_kms *kms = NULL;
int i;
- priv = dev->dev_private;
- kms = to_dpu_kms(priv->kms);
-
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
if (!dpu_crtc)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index d82ea994063f..f96e142c4361 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -645,11 +645,6 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- }
-
hw_mdptop = dpu_kms->hw_mdp;
if (!hw_mdptop) {
DPU_ERROR("invalid mdptop\n");
@@ -735,8 +730,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
struct msm_drm_private *priv;
bool is_vid_mode = false;
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
- !drm_enc->crtc) {
+ if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
@@ -1092,17 +1086,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc || !drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!dpu_enc || !dpu_enc->cur_master) {
@@ -1184,7 +1174,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- struct drm_display_mode *mode;
int i = 0;
if (!drm_enc) {
@@ -1193,9 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
} else if (!drm_enc->dev) {
DPU_ERROR("invalid dev\n");
return;
- } else if (!drm_enc->dev->dev_private) {
- DPU_ERROR("invalid dev_private\n");
- return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -1204,8 +1190,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
mutex_lock(&dpu_enc->enc_lock);
dpu_enc->enabled = false;
- mode = &drm_enc->crtc->state->adjusted_mode;
-
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1734,8 +1718,7 @@ static void dpu_encoder_vsync_event_handler(struct timer_list *t)
struct msm_drm_private *priv;
struct msm_drm_thread *event_thread;
- if (!drm_enc->dev || !drm_enc->dev->dev_private ||
- !drm_enc->crtc) {
+ if (!drm_enc->dev || !drm_enc->crtc) {
DPU_ERROR("invalid parameters\n");
return;
}
@@ -1914,8 +1897,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
int i;
static const struct file_operations debugfs_status_fops = {
@@ -1927,14 +1908,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
char name[DPU_NAME_SIZE];
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
- priv = drm_enc->dev->dev_private;
- dpu_kms = to_dpu_kms(priv->kms);
-
snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
/* create overall sub-directory for the encoder */
@@ -2042,9 +2020,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
enum dpu_intf_type intf_type;
struct dpu_enc_phys_init_params phys_params;
- if (!dpu_enc || !dpu_kms) {
- DPU_ERROR("invalid arg(s), enc %d kms %d\n",
- dpu_enc != 0, dpu_kms != 0);
+ if (!dpu_enc) {
+ DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
return -EINVAL;
}
@@ -2133,14 +2110,12 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
frame_done_timer);
struct drm_encoder *drm_enc = &dpu_enc->base;
- struct msm_drm_private *priv;
u32 event;
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
- priv = drm_enc->dev->dev_private;
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 2923b63d95fe..047960949fbb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -124,13 +124,11 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc || !phys_enc->hw_ctl)
return;
DPU_ATRACE_BEGIN("ctl_start_irq");
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
@@ -316,13 +314,9 @@ end:
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- struct dpu_encoder_phys_cmd *cmd_enc;
-
if (!phys_enc)
return;
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
-
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
@@ -355,7 +349,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
struct drm_display_mode *mode;
bool tc_enable = true;
u32 vsync_hz;
- struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!phys_enc || !phys_enc->hw_pp) {
@@ -373,11 +366,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
}
dpu_kms = phys_enc->dpu_kms;
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid device\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
/*
* TE default: dsi byte clock calculated base on 70 fps;
@@ -650,13 +638,10 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
int rc;
- struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc)
return -EINVAL;
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
-
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index b9c84fb4d4a1..3123ef873cdf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -374,7 +374,7 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (!phys_enc || !phys_enc->dpu_kms) {
+ if (!phys_enc) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
@@ -566,16 +566,13 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
- struct msm_drm_private *priv;
unsigned long lock_flags;
int ret;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
- !phys_enc->parent->dev->dev_private) {
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
return;
}
- priv = phys_enc->parent->dev->dev_private;
if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 58b0485dc375..6c92f0fbeac9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -30,10 +30,6 @@
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
-static const char * const iommu_ports[] = {
- "mdp_0",
-};
-
/*
* To enable overall DRM driver logging
* # echo 0x2 > /sys/module/drm/parameters/debug
@@ -68,16 +64,14 @@ static int _dpu_danger_signal_status(struct seq_file *s,
bool danger_status)
{
struct dpu_kms *kms = (struct dpu_kms *)s->private;
- struct msm_drm_private *priv;
struct dpu_danger_safe_status status;
int i;
- if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ if (!kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
- priv = kms->dev->dev_private;
memset(&status, 0, sizeof(struct dpu_danger_safe_status));
pm_runtime_get_sync(&kms->pdev->dev);
@@ -153,13 +147,7 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
return 0;
dev = dpu_kms->dev;
- if (!dev)
- return 0;
-
priv = dev->dev_private;
- if (!priv)
- return 0;
-
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
@@ -280,7 +268,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct dpu_kms *dpu_kms;
- struct msm_drm_private *priv;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -292,10 +279,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
- if (!dev || !dev->dev_private)
- return;
- priv = dev->dev_private;
-
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
drm_for_each_encoder_mask(encoder, crtc->dev,
@@ -333,7 +316,6 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder)
if (funcs && funcs->commit)
funcs->commit(encoder);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
drm_for_each_crtc(crtc, dev) {
if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
continue;
@@ -464,16 +446,6 @@ static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
- DPU_ERROR("invalid dev\n");
- return;
- } else if (!dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid dev_private\n");
- return;
- }
priv = dpu_kms->dev->dev_private;
for (i = 0; i < priv->num_crtcs; i++)
@@ -505,7 +477,6 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
-
dev = dpu_kms->dev;
priv = dev->dev_private;
catalog = dpu_kms->catalog;
@@ -585,8 +556,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
int i;
dev = dpu_kms->dev;
- if (!dev)
- return;
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
@@ -725,8 +694,7 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
mmu = dpu_kms->base.aspace->mmu;
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ mmu->funcs->detach(mmu);
msm_gem_address_space_put(dpu_kms->base.aspace);
dpu_kms->base.aspace = NULL;
@@ -752,8 +720,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return PTR_ERR(aspace);
}
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DPU_ERROR("failed to attach iommu %d\n", ret);
msm_gem_address_space_put(aspace);
@@ -803,16 +770,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
- if (!dev) {
- DPU_ERROR("invalid device\n");
- return rc;
- }
-
priv = dev->dev_private;
- if (!priv) {
- DPU_ERROR("invalid private data\n");
- return rc;
- }
atomic_set(&dpu_kms->bandwidth_ref, 0);
@@ -974,7 +932,7 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev)
struct dpu_kms *dpu_kms;
int irq;
- if (!dev || !dev->dev_private) {
+ if (!dev) {
DPU_ERROR("drm device node invalid\n");
return ERR_PTR(-EINVAL);
}
@@ -1064,11 +1022,6 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
- if (!ddev) {
- DPU_ERROR("invalid drm_device\n");
- return rc;
- }
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
@@ -1086,11 +1039,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
- if (!ddev) {
- DPU_ERROR("invalid drm_device\n");
- return rc;
- }
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 959d03e007fa..c6169e7df19d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -139,10 +139,6 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
-/* get struct msm_kms * from drm_device * */
-#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
- ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
-
/**
* Debugfs functions - extra helper functions for debugfs support
*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 8d24b79fd400..991f4c8f8a12 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -154,10 +154,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
u32 ot_lim;
int ret, i;
- if (!dpu_kms) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
mdp = dpu_kms->hw_mdp;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
@@ -214,7 +210,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
const struct dpu_vbif_qos_tbl *qos_tbl;
int i;
- if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+ if (!params || !dpu_kms->hw_mdp) {
DPU_ERROR("invalid arguments\n");
return;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 50711ccc8691..dda05436f716 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -157,10 +157,6 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
}
}
-static const char * const iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
@@ -172,8 +168,7 @@ static void mdp4_destroy(struct msm_kms *kms)
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
@@ -524,8 +519,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index f6e71ff539ca..1f48f64539a2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -14,7 +14,7 @@ struct mdp5_cfg_handler {
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
-const struct mdp5_cfg_hw msm8x74v1_config = {
+static const struct mdp5_cfg_hw msm8x74v1_config = {
.name = "msm8x74v1",
.mdp = {
.count = 1,
@@ -98,7 +98,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.max_clk = 200000000,
};
-const struct mdp5_cfg_hw msm8x74v2_config = {
+static const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
@@ -180,7 +180,7 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
.max_clk = 200000000,
};
-const struct mdp5_cfg_hw apq8084_config = {
+static const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.mdp = {
.count = 1,
@@ -275,7 +275,7 @@ const struct mdp5_cfg_hw apq8084_config = {
.max_clk = 320000000,
};
-const struct mdp5_cfg_hw msm8x16_config = {
+static const struct mdp5_cfg_hw msm8x16_config = {
.name = "msm8x16",
.mdp = {
.count = 1,
@@ -342,7 +342,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
-const struct mdp5_cfg_hw msm8x94_config = {
+static const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
.count = 1,
@@ -437,7 +437,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
.max_clk = 400000000,
};
-const struct mdp5_cfg_hw msm8x96_config = {
+static const struct mdp5_cfg_hw msm8x96_config = {
.name = "msm8x96",
.mdp = {
.count = 1,
@@ -545,7 +545,104 @@ const struct mdp5_cfg_hw msm8x96_config = {
.max_clk = 412500000,
};
-const struct mdp5_cfg_hw msm8917_config = {
+const struct mdp5_cfg_hw msm8x76_config = {
+ .name = "msm8x76",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_DSC |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .smp = {
+ .mmb_count = 10,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 9,
+ [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .pipe_vig = {
+ .count = 2,
+ .base = { 0x04000, 0x06000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x440DC },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x70000, 0x70800, 0x72000 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 360000000,
+};
+
+static const struct mdp5_cfg_hw msm8917_config = {
.name = "msm8917",
.mdp = {
.count = 1,
@@ -630,7 +727,7 @@ const struct mdp5_cfg_hw msm8917_config = {
.max_clk = 320000000,
};
-const struct mdp5_cfg_hw msm8998_config = {
+static const struct mdp5_cfg_hw msm8998_config = {
.name = "msm8998",
.mdp = {
.count = 1,
@@ -745,6 +842,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 11, .config = { .hw = &msm8x76_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index eb0b4b7dc7cc..05cc04f729d6 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -214,7 +214,6 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
- const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
const struct mdp_format *format;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
@@ -232,8 +231,6 @@ static void blend_setup(struct drm_crtc *crtc)
u32 val;
#define blender(stage) ((stage) - STAGE0)
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* ctl could be released already when we are shutting down: */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 91cd76a2bab1..e43ecd4be10a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -19,10 +19,6 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -233,8 +229,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
}
@@ -314,6 +309,10 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
mdp5_kms->enable_count--;
WARN_ON(mdp5_kms->enable_count < 0);
+ if (mdp5_kms->tbu_rt_clk)
+ clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
+ if (mdp5_kms->tbu_clk)
+ clk_disable_unprepare(mdp5_kms->tbu_clk);
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
@@ -334,6 +333,10 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
clk_prepare_enable(mdp5_kms->core_clk);
if (mdp5_kms->lut_clk)
clk_prepare_enable(mdp5_kms->lut_clk);
+ if (mdp5_kms->tbu_clk)
+ clk_prepare_enable(mdp5_kms->tbu_clk);
+ if (mdp5_kms->tbu_rt_clk)
+ clk_prepare_enable(mdp5_kms->tbu_rt_clk);
return 0;
}
@@ -466,14 +469,11 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct mdp5_cfg_hw *hw_cfg;
unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
struct drm_plane *cursor[MAX_BASES] = { NULL };
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-
/*
* Construct encoders and modeset initialize connector devices
* for each external display interface.
@@ -737,8 +737,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
@@ -974,6 +973,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
+ get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
+ get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index d1bf4fdfc815..128866742593 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -53,6 +53,8 @@ struct mdp5_kms {
struct clk *ahb_clk;
struct clk *core_clk;
struct clk *lut_clk;
+ struct clk *tbu_clk;
+ struct clk *tbu_rt_clk;
struct clk *vsync_clk;
/*
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index b31cfb554fa2..d7fa2c49e741 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -121,7 +121,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
struct mdp5_kms *mdp5_kms = get_kms(smp);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines;
- u32 fmt = format->base.pixel_format;
uint32_t blkcfg = 0;
nplanes = info->num_planes;
@@ -135,7 +134,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
* them together, writes to SMP using a single client.
*/
if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
- fmt = DRM_FORMAT_NV24;
nplanes = 2;
/* if decimation is enabled, HW decimates less on the
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index b7b7c1a9164a..86ad3fdf207d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -66,6 +66,26 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
.num_dsi = 1,
};
+static const char * const dsi_8976_bus_clk_names[] = {
+ "mdp_core", "iface", "bus",
+};
+
+static const struct msm_dsi_config msm8976_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 3,
+ .regs = {
+ {"gdsc", -1, -1},
+ {"vdda", 100000, 100}, /* 1.2 V */
+ {"vddio", 100000, 100}, /* 1.8 V */
+ },
+ },
+ .bus_clk_names = dsi_8976_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
+ .io_start = { 0x1a94000, 0x1a96000 },
+ .num_dsi = 2,
+};
+
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
@@ -147,7 +167,7 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
-const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_enable = dsi_link_clk_enable_v2,
.link_clk_disable = dsi_link_clk_disable_v2,
.clk_init_ver = dsi_clk_init_v2,
@@ -158,7 +178,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_v2,
};
-const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = NULL,
@@ -169,7 +189,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
-const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = dsi_clk_init_6g_v2,
@@ -197,6 +217,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
+ &msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index e2b7a7dfbe49..50a37ceb6a25 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -17,6 +17,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
+#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 1e7b1be25bb0..458cec82ae13 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1293,14 +1293,13 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
u8 *buf, int rx_byte, int pkt_size)
{
- u32 *lp, *temp, data;
+ u32 *temp, data;
int i, j = 0, cnt;
u32 read_cnt;
u8 reg[16];
int repeated_bytes = 0;
int buf_offset = buf - msm_host->rx_buf;
- lp = (u32 *)buf;
temp = (u32 *)reg;
cnt = (rx_byte + 3) >> 2;
if (cnt > 4)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 3522863a4984..b0cfa67d2a57 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -145,7 +145,7 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
- s32 ui, ui_x8, lpx;
+ s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
@@ -175,7 +175,6 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
- lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
tmin = max_t(s32, temp, 0);
@@ -262,7 +261,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
- s32 ui, ui_x8, lpx;
+ s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
@@ -284,7 +283,6 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
- lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
tmin = max_t(s32, temp, 0);
@@ -485,6 +483,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
.data = &dsi_phy_28nm_hpm_cfgs },
+ { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
+ .data = &dsi_phy_28nm_hpm_famb_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-lp",
.data = &dsi_phy_28nm_lp_cfgs },
#endif
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c4069ce6afe6..24b294ed3059 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -40,6 +40,7 @@ struct msm_dsi_phy_cfg {
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index b3f678f6c2aa..c3c580cfd8b1 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -39,15 +39,10 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
-static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
- if (!enable) {
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
- return;
- }
-
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
@@ -56,6 +51,39 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+}
+
+static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+
+ if (phy->cfg->type == MSM_DSI_PHY_28NM_LP)
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
+ else
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!enable) {
+ dsi_phy_write(phy->reg_base +
+ REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode)
+ dsi_28nm_phy_regulator_enable_ldo(phy);
+ else
+ dsi_28nm_phy_regulator_enable_dcdc(phy);
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
@@ -77,8 +105,6 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
dsi_28nm_phy_regulator_ctrl(phy, true);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
-
dsi_28nm_dphy_set_timing(phy, timing);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
@@ -142,6 +168,24 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.num_dsi_phy = 2,
};
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
+ .type = MSM_DSI_PHY_28NM_HPM,
+ .src_pll_truthtable = { {true, true}, {false, true} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vddio", 100000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
+ },
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.type = MSM_DSI_PHY_28NM_LP,
.src_pll_truthtable = { {true, true}, {true, true} },
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 1697e61f9c2f..8a38d4b95102 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -29,8 +29,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
reg = devm_regulator_get(dev, cfg->reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
- cfg->reg_names[i], ret);
+ if (ret != -EPROBE_DEFER) {
+ DRM_DEV_ERROR(dev,
+ "failed to get phy regulator: %s (%d)\n",
+ cfg->reg_names[i], ret);
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index a052364a5d74..18f3a5c53ffb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -16,6 +16,7 @@
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/devcoredump.h>
+#include <linux/sched/task.h>
/*
* Power Management:
@@ -838,7 +839,7 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
return ERR_CAST(aspace);
}
- ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
msm_gem_address_space_put(aspace);
return ERR_PTR(ret);
@@ -995,8 +996,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
- NULL, 0);
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
msm_gem_address_space_put(gpu->aspace);
}
}
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 34f643a0c28a..34980d8eb7ad 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -21,14 +21,12 @@ struct msm_gpummu {
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
-static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static int msm_gpummu_attach(struct msm_mmu *mmu)
{
return 0;
}
-static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static void msm_gpummu_detach(struct msm_mmu *mmu)
{
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 8c95c31e2b12..ad58cfe5998e 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -23,16 +23,14 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
-static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static int msm_iommu_attach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return iommu_attach_device(iommu->domain, mmu->dev);
}
-static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 871d56303697..67a623f14319 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -10,8 +10,8 @@
#include <linux/iommu.h>
struct msm_mmu_funcs {
- int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
+ int (*attach)(struct msm_mmu *mmu);
+ void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index c7832a951039..af7ceb246c7c 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -298,7 +298,7 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
- uint64_t iova, uint32_t size)
+ uint64_t iova, uint32_t size, bool full)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
unsigned offset = 0;
@@ -318,6 +318,9 @@ static void snapshot_buf(struct msm_rd_state *rd,
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
+ if (!full)
+ return;
+
/* But only dump the contents of buffers marked READ */
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
@@ -381,18 +384,21 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; i < submit->nr_bos; i++)
- if (should_dump(submit, i))
- snapshot_buf(rd, submit, i, 0, 0);
+ snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
for (i = 0; i < submit->nr_cmds; i++) {
- uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
- submit->cmd[i].iova, szd * 4);
+ submit->cmd[i].iova, szd * 4, true);
}
+ }
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint64_t iova = submit->cmd[i].iova;
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index e518d93ca6df..d08ae95ecc0a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -843,9 +843,13 @@ fail:
*/
static void omap_gem_unpin_locked(struct drm_gem_object *obj)
{
+ struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret;
+ if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm)
+ return;
+
if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
ret = tiler_unpin(omap_obj->block);
if (ret) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index acabeaf28732..40a7e702c2a9 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9500,7 +9500,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -9542,12 +9541,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -9557,14 +9551,17 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -9582,15 +9579,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -9603,26 +9608,45 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -9636,15 +9660,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 7089dfc8c2a9..110fb38004b1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1826,8 +1826,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
+ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
track->textures[i].tex_coord_type = 2;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 840401413c58..f5f2ffea5ab2 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
+ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
track->textures[i].lookup_disable = true;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 1d8efb0eefdb..d7eea75b2c27 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3257,7 +3257,7 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
- }
+ }
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
@@ -7087,7 +7087,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -7129,12 +7128,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -7144,14 +7138,17 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -7169,15 +7166,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -7190,26 +7195,46 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -7223,15 +7248,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 5b1f9ff97576..714af052fbef 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -837,16 +837,15 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane,
static void tegra_cursor_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0);
+ struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
- struct drm_plane_state *state = plane->state;
u32 value = CURSOR_CLIP_DISPLAY;
/* rien ne va plus */
if (!plane->state->crtc || !plane->state->fb)
return;
- switch (state->crtc_w) {
+ switch (plane->state->crtc_w) {
case 32:
value |= CURSOR_SIZE_32x32;
break;
@@ -864,16 +863,16 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
break;
default:
- WARN(1, "cursor size %ux%u not supported\n", state->crtc_w,
- state->crtc_h);
+ WARN(1, "cursor size %ux%u not supported\n",
+ plane->state->crtc_w, plane->state->crtc_h);
return;
}
- value |= (bo->iova >> 10) & 0x3fffff;
+ value |= (state->iova[0] >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (bo->iova >> 32) & 0x3;
+ value = (state->iova[0] >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -892,7 +891,8 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
/* position the cursor */
- value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
+ value = (plane->state->crtc_y & 0x3fff) << 16 |
+ (plane->state->crtc_x & 0x3fff);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
@@ -2017,7 +2017,7 @@ static int tegra_dc_init(struct host1x_client *client)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
err = host1x_client_iommu_attach(client);
- if (err < 0) {
+ if (err < 0 && err != -ENODEV) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 56e5e7a5c108..f455ce71e85d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -920,10 +920,8 @@ int host1x_client_iommu_attach(struct host1x_client *client)
if (tegra->domain) {
group = iommu_group_get(client->dev);
- if (!group) {
- dev_err(client->dev, "failed to get IOMMU group\n");
+ if (!group)
return -ENODEV;
- }
if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
@@ -1243,6 +1241,9 @@ static int host1x_drm_remove(struct host1x_device *dev)
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+
err = host1x_device_exit(dev);
if (err < 0)
dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 746dae32c484..bc15b430156d 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -27,6 +27,29 @@ static void tegra_bo_put(struct host1x_bo *bo)
drm_gem_object_put_unlocked(&obj->gem);
}
+/* XXX move this into lib/scatterlist.c? */
+static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
+ unsigned int nents, gfp_t gfp_mask)
+{
+ struct scatterlist *dst;
+ unsigned int i;
+ int err;
+
+ err = sg_alloc_table(sgt, nents, gfp_mask);
+ if (err < 0)
+ return err;
+
+ dst = sgt->sgl;
+
+ for (i = 0; i < nents; i++) {
+ sg_set_page(dst, sg_page(sg), sg->length, 0);
+ dst = sg_next(dst);
+ sg = sg_next(sg);
+ }
+
+ return 0;
+}
+
static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
dma_addr_t *phys)
{
@@ -52,11 +75,31 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
return ERR_PTR(-ENOMEM);
if (obj->pages) {
+ /*
+ * If the buffer object was allocated from the explicit IOMMU
+ * API code paths, construct an SG table from the pages.
+ */
err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
0, obj->gem.size, GFP_KERNEL);
if (err < 0)
goto free;
+ } else if (obj->sgt) {
+ /*
+ * If the buffer object already has an SG table but no pages
+ * were allocated for it, it means the buffer was imported and
+ * the SG table needs to be copied to avoid overwriting any
+ * other potential users of the original SG table.
+ */
+ err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
+ GFP_KERNEL);
+ if (err < 0)
+ goto free;
} else {
+ /*
+ * If the buffer object had no pages allocated and if it was
+ * not imported, it had to be allocated with the DMA API, so
+ * the DMA API helper can be used.
+ */
err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
obj->gem.size);
if (err < 0)
@@ -397,13 +440,6 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0)
goto detach;
- } else {
- if (bo->sgt->nents > 1) {
- err = -EINVAL;
- goto detach;
- }
-
- bo->iova = sg_dma_address(bo->sgt->sgl);
}
bo->gem.import_attach = attach;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 2b4082d0bc9e..47d985ac7cd7 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -605,11 +605,8 @@ static struct tegra_display_hub_state *
tegra_display_hub_get_state(struct tegra_display_hub *hub,
struct drm_atomic_state *state)
{
- struct drm_device *drm = dev_get_drvdata(hub->client.parent);
struct drm_private_state *priv;
- WARN_ON(!drm_modeset_is_locked(&drm->mode_config.connection_mutex));
-
priv = drm_atomic_get_private_obj_state(state, &hub->base);
if (IS_ERR(priv))
return ERR_CAST(priv);
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 163b590be224..cadcdd9ea427 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -129,6 +129,17 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
goto unpin;
}
+ /*
+ * The display controller needs contiguous memory, so
+ * fail if the buffer is discontiguous and we fail to
+ * map its SG table to a single contiguous chunk of
+ * I/O virtual memory.
+ */
+ if (err > 1) {
+ err = -EINVAL;
+ goto unpin;
+ }
+
state->iova[i] = sg_dma_address(sgt->sgl);
state->sgt[i] = sgt;
} else {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 615cb319fa8b..a68d3b36b972 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3912,8 +3912,7 @@ static int tegra_sor_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_sor_suspend(struct device *dev)
+static int tegra_sor_runtime_suspend(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
@@ -3935,7 +3934,7 @@ static int tegra_sor_suspend(struct device *dev)
return 0;
}
-static int tegra_sor_resume(struct device *dev)
+static int tegra_sor_runtime_resume(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
@@ -3967,10 +3966,39 @@ static int tegra_sor_resume(struct device *dev)
return 0;
}
-#endif
+
+static int tegra_sor_suspend(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ if (sor->hdmi_supply) {
+ err = regulator_disable(sor->hdmi_supply);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_sor_resume(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ if (sor->hdmi_supply) {
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
static const struct dev_pm_ops tegra_sor_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
+ SET_RUNTIME_PM_OPS(tegra_sor_runtime_suspend, tegra_sor_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume)
};
struct platform_driver tegra_sor_driver = {
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 9444ba183990..3526c2892ddb 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -167,7 +167,7 @@ static int vic_init(struct host1x_client *client)
int err;
err = host1x_client_iommu_attach(client);
- if (err < 0) {
+ if (err < 0 && err != -ENODEV) {
dev_err(vic->dev, "failed to attach to domain: %d\n", err);
return err;
}
@@ -386,13 +386,14 @@ static const struct vic_config vic_t194_config = {
.supports_sid = true,
};
-static const struct of_device_id vic_match[] = {
+static const struct of_device_id tegra_vic_of_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
{ .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
static int vic_probe(struct platform_device *pdev)
{
@@ -516,7 +517,7 @@ static const struct dev_pm_ops vic_pm_ops = {
struct platform_driver tegra_vic_driver = {
.driver = {
.name = "tegra-vic",
- .of_match_table = vic_match,
+ .of_match_table = tegra_vic_of_match,
.pm = &vic_pm_ops
},
.probe = vic_probe,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 347b08b56042..75fd2a7b0842 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1291,8 +1291,8 @@ static void sklh_idle_state_table_update(void)
return;
}
- skl_cstates[5].disabled = 1; /* C8-SKL */
- skl_cstates[6].disabled = 1; /* C9-SKL */
+ skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
+ skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
}
/*
* intel_idle_state_table_update()
@@ -1355,7 +1355,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
continue;
/* if state marked as disabled, skip it */
- if (cpuidle_state_table[cstate].disabled != 0) {
+ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
cpuidle_state_table[cstate].name);
continue;
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index fcc3f999e482..65f85faf6f31 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -163,16 +163,10 @@ static const struct iio_chan_spec cros_ec_accel_legacy_channels[] = {
static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_sensors_core_state *state;
int ret;
- if (!ec || !ec->ec_dev) {
- dev_warn(&pdev->dev, "No EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
index cdbb29cfb907..fefad9572790 100644
--- a/drivers/iio/common/cros_ec_sensors/Kconfig
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -4,7 +4,7 @@
#
config IIO_CROS_EC_SENSORS_CORE
tristate "ChromeOS EC Sensors Core"
- depends on SYSFS && CROS_EC
+ depends on SYSFS && CROS_EC_SENSORHUB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index a6987726eeb8..7dce04473467 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -222,17 +222,11 @@ static const struct iio_info ec_sensors_info = {
static int cros_ec_sensors_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_sensors_state *state;
struct iio_chan_spec *channel;
int ret, i;
- if (!ec_dev || !ec_dev->ec_dev) {
- dev_warn(&pdev->dev, "No CROS EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index d2609e6feda4..81a7f692de2f 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
static char *cros_ec_loc[] = {
@@ -88,7 +89,8 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
- struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
+ struct cros_ec_dev *ec = sensor_hub->ec;
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
u32 ver_mask;
int ret, i;
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index c5263b563fc1..d85a391e50c5 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -169,17 +169,11 @@ static const struct iio_info cros_ec_light_prox_info = {
static int cros_ec_light_prox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_light_prox_state *state;
struct iio_chan_spec *channel;
int ret;
- if (!ec_dev || !ec_dev->ec_dev) {
- dev_warn(dev, "No CROS EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 61f4eb63eec1..4706ff09f0e8 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -450,7 +450,7 @@ config KEYBOARD_SNVS_PWRKEY
depends on OF
help
This is the snvs powerkey driver for the Freescale i.MX application
- processors that are newer than i.MX6 SX.
+ processors.
To compile this driver as a module, choose M here; the
module will be called snvs_pwrkey.
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 8d4d9786cc74..2b71c5a51f90 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -226,8 +226,6 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
{
struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb,
notifier);
- uint8_t mkbp_event_type = ckdev->ec->event_data.event_type &
- EC_MKBP_EVENT_TYPE_MASK;
u32 val;
unsigned int ev_type;
@@ -239,7 +237,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
if (queued_during_suspend && !device_may_wakeup(ckdev->dev))
return NOTIFY_OK;
- switch (mkbp_event_type) {
+ switch (ckdev->ec->event_data.event_type) {
case EC_MKBP_EVENT_KEY_MATRIX:
pm_wakeup_event(ckdev->dev, 0);
@@ -266,7 +264,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_SWITCH:
pm_wakeup_event(ckdev->dev, 0);
- if (mkbp_event_type == EC_MKBP_EVENT_BUTTON) {
+ if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) {
val = get_unaligned_le32(
&ckdev->ec->event_data.data.buttons);
ev_type = EV_KEY;
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index e76b7a400a1c..2f5e3ab5ed63 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -19,15 +19,16 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#define SNVS_LPSR_REG 0x4C /* LP Status Register */
-#define SNVS_LPCR_REG 0x38 /* LP Control Register */
-#define SNVS_HPSR_REG 0x14
-#define SNVS_HPSR_BTN BIT(6)
-#define SNVS_LPSR_SPO BIT(18)
-#define SNVS_LPCR_DEP_EN BIT(5)
+#define SNVS_HPVIDR1_REG 0xF8
+#define SNVS_LPSR_REG 0x4C /* LP Status Register */
+#define SNVS_LPCR_REG 0x38 /* LP Control Register */
+#define SNVS_HPSR_REG 0x14
+#define SNVS_HPSR_BTN BIT(6)
+#define SNVS_LPSR_SPO BIT(18)
+#define SNVS_LPCR_DEP_EN BIT(5)
-#define DEBOUNCE_TIME 30
-#define REPEAT_INTERVAL 60
+#define DEBOUNCE_TIME 30
+#define REPEAT_INTERVAL 60
struct pwrkey_drv_data {
struct regmap *snvs;
@@ -37,6 +38,7 @@ struct pwrkey_drv_data {
int wakeup;
struct timer_list check_timer;
struct input_dev *input;
+ u8 minor_rev;
};
static void imx_imx_snvs_check_for_events(struct timer_list *t)
@@ -67,13 +69,29 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
+ struct input_dev *input = pdata->input;
u32 lp_status;
- pm_wakeup_event(pdata->input->dev.parent, 0);
+ pm_wakeup_event(input->dev.parent, 0);
regmap_read(pdata->snvs, SNVS_LPSR_REG, &lp_status);
- if (lp_status & SNVS_LPSR_SPO)
- mod_timer(&pdata->check_timer, jiffies + msecs_to_jiffies(DEBOUNCE_TIME));
+ if (lp_status & SNVS_LPSR_SPO) {
+ if (pdata->minor_rev == 0) {
+ /*
+ * The first generation i.MX6 SoCs only sends an
+ * interrupt on button release. To mimic power-key
+ * usage, we'll prepend a press event.
+ */
+ input_report_key(input, pdata->keycode, 1);
+ input_sync(input);
+ input_report_key(input, pdata->keycode, 0);
+ input_sync(input);
+ pm_relax(input->dev.parent);
+ } else {
+ mod_timer(&pdata->check_timer,
+ jiffies + msecs_to_jiffies(DEBOUNCE_TIME));
+ }
+ }
/* clear SPO status */
regmap_write(pdata->snvs, SNVS_LPSR_REG, SNVS_LPSR_SPO);
@@ -90,10 +108,11 @@ static void imx_snvs_pwrkey_act(void *pdata)
static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
{
- struct pwrkey_drv_data *pdata = NULL;
- struct input_dev *input = NULL;
+ struct pwrkey_drv_data *pdata;
+ struct input_dev *input;
struct device_node *np;
int error;
+ u32 vid;
/* Get SNVS register Page */
np = pdev->dev.of_node;
@@ -121,6 +140,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
if (pdata->irq < 0)
return -EINVAL;
+ regmap_read(pdata->snvs, SNVS_HPVIDR1_REG, &vid);
+ pdata->minor_rev = vid & 0xff;
+
regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_DEP_EN, SNVS_LPCR_DEP_EN);
/* clear the unexpected interrupt before driver ready */
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 84051f20b18a..fd253781be71 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -695,7 +695,7 @@ static __poll_t uinput_poll(struct file *file, poll_table *wait)
if (udev->head != udev->tail)
return EPOLLIN | EPOLLRDNORM;
- return 0;
+ return EPOLLOUT | EPOLLWRNORM;
}
static int uinput_release(struct inode *inode, struct file *file)
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
index a4cabf52740c..74f7c6f214ff 100644
--- a/drivers/input/rmi4/rmi_f34v7.c
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -1189,6 +1189,9 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
{
int ret;
+ f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev,
+ f34->fn->irq_mask);
+
rmi_f34v7_read_queries_bl_version(f34);
f34->v7.image = fw->data;
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 2407ea43de59..b313c579914f 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -163,7 +163,6 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to write next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
- rmiaddr += SMB_MAX_COUNT;
}
exit:
mutex_unlock(&rmi_smb->page_mutex);
@@ -215,7 +214,6 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to read next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
- rmiaddr += SMB_MAX_COUNT;
}
retval = 0;
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index fb43aa708660..0403102e807e 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -129,6 +129,15 @@ static const unsigned long goodix_irq_flags[] = {
static const struct dmi_system_id rotated_screen[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
+ .ident = "Teclast X89",
+ .matches = {
+ /* tPAD is too generic, also match on bios date */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
+ DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
+ },
+ },
+ {
.ident = "WinBook TW100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f1086eaed41c..0b9d78a0f3ac 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -3,6 +3,10 @@
config IOMMU_IOVA
tristate
+# The IOASID library may also be used by non-IOMMU_API users
+config IOASID
+ tristate
+
# IOMMU_API always gets selected by whoever wants it.
config IOMMU_API
bool
@@ -138,6 +142,7 @@ config AMD_IOMMU
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
+ select IOMMU_DMA
depends on X86_64 && PCI && ACPI
---help---
With this option you can enable support for AMD IOMMU hardware in
@@ -207,6 +212,7 @@ config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
select PCI_PASID
+ select PCI_PRI
select MMU_NOTIFIER
help
Shared Virtual Memory (SVM) provides a facility for devices
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 4f405f926e73..97814cc861ea 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,13 +7,14 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 12e5039a7a25..bd25674ee4db 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dma-direct.h>
+#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
#include <linux/iommu.h>
#include <linux/delay.h>
@@ -88,8 +89,6 @@ const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
-static const struct dma_map_ops amd_iommu_dma_ops;
-
/*
* general struct to manage commands send to an IOMMU
*/
@@ -102,21 +101,6 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
-static void iova_domain_flush_tlb(struct iova_domain *iovad);
-
-/*
- * Data container for a dma_ops specific protection domain
- */
-struct dma_ops_domain {
- /* generic protection domain information */
- struct protection_domain domain;
-
- /* IOVA RB-Tree */
- struct iova_domain iovad;
-};
-
-static struct iova_domain reserved_iova_ranges;
-static struct lock_class_key reserved_rbtree_key;
/****************************************************************************
*
@@ -167,12 +151,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
-{
- BUG_ON(domain->flags != PD_DMA_OPS_MASK);
- return container_of(domain, struct dma_ops_domain, domain);
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -206,71 +184,61 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
return NULL;
}
-static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- *(u16 *)data = alias;
- return 0;
-}
-
-static u16 get_alias(struct device *dev)
+static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 devid, ivrs_alias, pci_alias;
-
- /* The callers make sure that get_device_id() does not fail here */
- devid = get_device_id(dev);
+ u16 devid = pci_dev_id(pdev);
- /* For ACPI HID devices, we simply return the devid as such */
- if (!dev_is_pci(dev))
- return devid;
+ if (devid == alias)
+ return 0;
- ivrs_alias = amd_iommu_alias_table[devid];
+ amd_iommu_rlookup_table[alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[alias].data));
- pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+ return 0;
+}
- if (ivrs_alias == pci_alias)
- return ivrs_alias;
+static void clone_aliases(struct pci_dev *pdev)
+{
+ if (!pdev)
+ return;
/*
- * DMA alias showdown
- *
- * The IVRS is fairly reliable in telling us about aliases, but it
- * can't know about every screwy device. If we don't have an IVRS
- * reported alias, use the PCI reported alias. In that case we may
- * still need to initialize the rlookup and dev_table entries if the
- * alias is to a non-existent device.
+ * The IVRS alias stored in the alias table may not be
+ * part of the PCI DMA aliases if it's bus differs
+ * from the original device.
*/
- if (ivrs_alias == devid) {
- if (!amd_iommu_rlookup_table[pci_alias]) {
- amd_iommu_rlookup_table[pci_alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[pci_alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[pci_alias].data));
- }
+ clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
- return pci_alias;
- }
+ pci_for_each_dma_alias(pdev, clone_alias, NULL);
+}
- pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d "
- "for device [%04x:%04x], kernel reported alias "
- "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
- PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device,
- PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
- PCI_FUNC(pci_alias));
+static struct pci_dev *setup_aliases(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 ivrs_alias;
+
+ /* For ACPI HID devices, there are no aliases */
+ if (!dev_is_pci(dev))
+ return NULL;
/*
- * If we don't have a PCI DMA alias and the IVRS alias is on the same
- * bus, then the IVRS table may know about a quirk that we don't.
+ * Add the IVRS alias to the pci aliases if it is on the same
+ * bus. The IVRS table may know about a quirk that we don't.
*/
- if (pci_alias == devid &&
+ ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
pci_add_dma_alias(pdev, ivrs_alias & 0xff);
pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
}
- return ivrs_alias;
+ clone_aliases(pdev);
+
+ return pdev;
}
static struct iommu_dev_data *find_dev_data(u16 devid)
@@ -408,7 +376,7 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
- dev_data->alias = get_alias(dev);
+ dev_data->pdev = setup_aliases(dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -433,20 +401,16 @@ static int iommu_init_device(struct device *dev)
static void iommu_ignore_device(struct device *dev)
{
- u16 alias;
int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
- alias = get_alias(dev);
-
+ amd_iommu_rlookup_table[devid] = NULL;
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
- memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
- amd_iommu_rlookup_table[devid] = NULL;
- amd_iommu_rlookup_table[alias] = NULL;
+ setup_aliases(dev);
}
static void iommu_uninit_device(struct device *dev)
@@ -620,8 +584,7 @@ retry:
pasid, address, flags);
break;
case EVENT_TYPE_INV_PPR_REQ:
- pasid = ((event[0] >> 16) & 0xFFFF)
- | ((event[1] << 6) & 0xF0000);
+ pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
@@ -856,17 +819,18 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
struct iommu_cmd *cmd)
{
u8 *target;
-
- target = iommu->cmd_buf + iommu->cmd_buf_tail;
-
- iommu->cmd_buf_tail += sizeof(*cmd);
- iommu->cmd_buf_tail %= CMD_BUFFER_SIZE;
+ u32 tail;
/* Copy command to buffer */
+ tail = iommu->cmd_buf_tail;
+ target = iommu->cmd_buf + tail;
memcpy(target, cmd, sizeof(*cmd));
+ tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+ iommu->cmd_buf_tail = tail;
+
/* Tell the IOMMU about it */
- writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
}
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
@@ -1216,6 +1180,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
return iommu_queue_command(iommu, &cmd);
}
+static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct amd_iommu *iommu = data;
+
+ return iommu_flush_dte(iommu, alias);
+}
+
/*
* Command send function for invalidating a device table entry
*/
@@ -1226,14 +1197,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
- ret = iommu_flush_dte(iommu, dev_data->devid);
- if (!ret && alias != dev_data->devid)
- ret = iommu_flush_dte(iommu, alias);
+ if (dev_data->pdev)
+ ret = pci_for_each_dma_alias(dev_data->pdev,
+ device_flush_dte_alias, iommu);
+ else
+ ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ ret = iommu_flush_dte(iommu, alias);
+ if (ret)
+ return ret;
+ }
+
if (dev_data->ats.enabled)
ret = device_flush_iotlb(dev_data, 0, ~0UL);
@@ -1282,12 +1261,6 @@ static void domain_flush_pages(struct protection_domain *domain,
__domain_flush_pages(domain, address, size, 0);
}
-/* Flush the whole IO/TLB for a given protection domain */
-static void domain_flush_tlb(struct protection_domain *domain)
-{
- __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
-}
-
/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void domain_flush_tlb_pde(struct protection_domain *domain)
{
@@ -1735,43 +1708,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
/****************************************************************************
*
- * The next functions belong to the address allocator for the dma_ops
- * interface functions.
- *
- ****************************************************************************/
-
-
-static unsigned long dma_ops_alloc_iova(struct device *dev,
- struct dma_ops_domain *dma_dom,
- unsigned int pages, u64 dma_mask)
-{
- unsigned long pfn = 0;
-
- pages = __roundup_pow_of_two(pages);
-
- if (dma_mask > DMA_BIT_MASK(32))
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(DMA_BIT_MASK(32)), false);
-
- if (!pfn)
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(dma_mask), true);
-
- return (pfn << PAGE_SHIFT);
-}
-
-static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
- unsigned long address,
- unsigned int pages)
-{
- pages = __roundup_pow_of_two(pages);
- address >>= PAGE_SHIFT;
-
- free_iova_fast(&dma_dom->iovad, address, pages);
-}
-
-/****************************************************************************
- *
* The next functions belong to the domain allocation. A domain is
* allocated for every IOMMU as the default domain. If device isolation
* is enabled, every device get its own domain. The most important thing
@@ -1846,42 +1782,23 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dom->domain.lock, flags);
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- spin_unlock_irqrestore(&dom->domain.lock, flags);
-}
-
-static void iova_domain_flush_tlb(struct iova_domain *iovad)
-{
- struct dma_ops_domain *dom;
-
- dom = container_of(iovad, struct dma_ops_domain, iovad);
-
- dma_ops_domain_flush_tlb(dom);
-}
-
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
*/
-static void dma_ops_domain_free(struct dma_ops_domain *dom)
+static void dma_ops_domain_free(struct protection_domain *domain)
{
- if (!dom)
+ if (!domain)
return;
- put_iova_domain(&dom->iovad);
+ iommu_put_dma_cookie(&domain->domain);
- free_pagetable(&dom->domain);
+ free_pagetable(domain);
- if (dom->domain.id)
- domain_id_free(dom->domain.id);
+ if (domain->id)
+ domain_id_free(domain->id);
- kfree(dom);
+ kfree(domain);
}
/*
@@ -1889,35 +1806,30 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
* It also initializes the page table and the address allocator data
* structures required for the dma_ops interface
*/
-static struct dma_ops_domain *dma_ops_domain_alloc(void)
+static struct protection_domain *dma_ops_domain_alloc(void)
{
- struct dma_ops_domain *dma_dom;
+ struct protection_domain *domain;
- dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
- if (!dma_dom)
+ domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
+ if (!domain)
return NULL;
- if (protection_domain_init(&dma_dom->domain))
- goto free_dma_dom;
-
- dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
- dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- dma_dom->domain.flags = PD_DMA_OPS_MASK;
- if (!dma_dom->domain.pt_root)
- goto free_dma_dom;
-
- init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
+ if (protection_domain_init(domain))
+ goto free_domain;
- if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
- goto free_dma_dom;
+ domain->mode = PAGE_MODE_3_LEVEL;
+ domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ domain->flags = PD_DMA_OPS_MASK;
+ if (!domain->pt_root)
+ goto free_domain;
- /* Initialize reserved ranges */
- copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
+ if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+ goto free_domain;
- return dma_dom;
+ return domain;
-free_dma_dom:
- dma_ops_domain_free(dma_dom);
+free_domain:
+ dma_ops_domain_free(domain);
return NULL;
}
@@ -2015,11 +1927,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu;
- u16 alias;
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -2032,8 +1942,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
- if (alias != dev_data->devid)
- set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
device_flush_dte(dev_data);
}
@@ -2042,17 +1951,14 @@ static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
clear_dte_entry(dev_data->devid);
- if (alias != dev_data->devid)
- clear_dte_entry(alias);
+ clone_aliases(dev_data->pdev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -2285,8 +2191,8 @@ static int amd_iommu_add_device(struct device *dev)
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
- else
- dev->dma_ops = &amd_iommu_dma_ops;
+ else if (domain->type == IOMMU_DOMAIN_DMA)
+ iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
out:
iommu_completion_wait(iommu);
@@ -2320,43 +2226,32 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
return acpihid_device_group(dev);
}
+static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ return -ENODEV;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = !amd_iommu_unmap_flush;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+}
+
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
*
*****************************************************************************/
-/*
- * In the dma_ops path we only have the struct device. This function
- * finds the corresponding IOMMU, the protection domain and the
- * requestor id for a given device.
- * If the device is not yet associated with a domain this is also done
- * in this function.
- */
-static struct protection_domain *get_domain(struct device *dev)
-{
- struct protection_domain *domain;
- struct iommu_domain *io_domain;
-
- if (!check_device(dev))
- return ERR_PTR(-EINVAL);
-
- domain = get_dev_data(dev)->domain;
- if (domain == NULL && get_dev_data(dev)->defer_attach) {
- get_dev_data(dev)->defer_attach = false;
- io_domain = iommu_get_domain_for_dev(dev);
- domain = to_pdomain(io_domain);
- attach_device(dev, domain);
- }
- if (domain == NULL)
- return ERR_PTR(-EBUSY);
-
- if (!dma_ops_domain(domain))
- return ERR_PTR(-EBUSY);
-
- return domain;
-}
-
static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
@@ -2364,13 +2259,7 @@ static void update_device_table(struct protection_domain *domain)
list_for_each_entry(dev_data, &domain->dev_list, list) {
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
dev_data->iommu_v2);
-
- if (dev_data->devid == dev_data->alias)
- continue;
-
- /* There is an alias, update device table entry for it */
- set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
- dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
}
}
@@ -2382,458 +2271,6 @@ static void update_domain(struct protection_domain *domain)
domain_flush_tlb_pde(domain);
}
-static int dir2prot(enum dma_data_direction direction)
-{
- if (direction == DMA_TO_DEVICE)
- return IOMMU_PROT_IR;
- else if (direction == DMA_FROM_DEVICE)
- return IOMMU_PROT_IW;
- else if (direction == DMA_BIDIRECTIONAL)
- return IOMMU_PROT_IW | IOMMU_PROT_IR;
- else
- return 0;
-}
-
-/*
- * This function contains common code for mapping of a physically
- * contiguous memory region into DMA address space. It is used by all
- * mapping functions provided with this IOMMU driver.
- * Must be called with the domain lock held.
- */
-static dma_addr_t __map_single(struct device *dev,
- struct dma_ops_domain *dma_dom,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction direction,
- u64 dma_mask)
-{
- dma_addr_t offset = paddr & ~PAGE_MASK;
- dma_addr_t address, start, ret;
- unsigned long flags;
- unsigned int pages;
- int prot = 0;
- int i;
-
- pages = iommu_num_pages(paddr, size, PAGE_SIZE);
- paddr &= PAGE_MASK;
-
- address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (!address)
- goto out;
-
- prot = dir2prot(direction);
-
- start = address;
- for (i = 0; i < pages; ++i) {
- ret = iommu_map_page(&dma_dom->domain, start, paddr,
- PAGE_SIZE, prot, GFP_ATOMIC);
- if (ret)
- goto out_unmap;
-
- paddr += PAGE_SIZE;
- start += PAGE_SIZE;
- }
- address += offset;
-
- domain_flush_np_cache(&dma_dom->domain, address, size);
-
-out:
- return address;
-
-out_unmap:
-
- for (--i; i >= 0; --i) {
- start -= PAGE_SIZE;
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- }
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
-
- dma_ops_free_iova(dma_dom, address, pages);
-
- return DMA_MAPPING_ERROR;
-}
-
-/*
- * Does the reverse of the __map_single function. Must be called with
- * the domain lock held too
- */
-static void __unmap_single(struct dma_ops_domain *dma_dom,
- dma_addr_t dma_addr,
- size_t size,
- int dir)
-{
- dma_addr_t i, start;
- unsigned int pages;
-
- pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
- dma_addr &= PAGE_MASK;
- start = dma_addr;
-
- for (i = 0; i < pages; ++i) {
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- start += PAGE_SIZE;
- }
-
- if (amd_iommu_unmap_flush) {
- unsigned long flags;
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
- dma_ops_free_iova(dma_dom, dma_addr, pages);
- } else {
- pages = __roundup_pow_of_two(pages);
- queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
- }
-}
-
-/*
- * The exported map_single function for dma_ops.
- */
-static dma_addr_t map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t paddr = page_to_phys(page) + offset;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- u64 dma_mask;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return (dma_addr_t)paddr;
- else if (IS_ERR(domain))
- return DMA_MAPPING_ERROR;
-
- dma_mask = *dev->dma_mask;
- dma_dom = to_dma_ops_domain(domain);
-
- return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
-}
-
-/*
- * The exported unmap_single function for dma_ops.
- */
-static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, dir);
-}
-
-static int sg_num_pages(struct device *dev,
- struct scatterlist *sglist,
- int nelems)
-{
- unsigned long mask, boundary_size;
- struct scatterlist *s;
- int i, npages = 0;
-
- mask = dma_get_seg_boundary(dev);
- boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
- 1UL << (BITS_PER_LONG - PAGE_SHIFT);
-
- for_each_sg(sglist, s, nelems, i) {
- int p, n;
-
- s->dma_address = npages << PAGE_SHIFT;
- p = npages % boundary_size;
- n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
- if (p + n > boundary_size)
- npages += boundary_size - p;
- npages += n;
- }
-
- return npages;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static int map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int mapped_pages = 0, npages = 0, prot = 0, i;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct scatterlist *s;
- unsigned long address;
- u64 dma_mask;
- int ret;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return 0;
-
- dma_dom = to_dma_ops_domain(domain);
- dma_mask = *dev->dma_mask;
-
- npages = sg_num_pages(dev, sglist, nelems);
-
- address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (!address)
- goto out_err;
-
- prot = dir2prot(direction);
-
- /* Map all sg entries */
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr, phys_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
- ret = iommu_map_page(domain, bus_addr, phys_addr,
- PAGE_SIZE, prot,
- GFP_ATOMIC | __GFP_NOWARN);
- if (ret)
- goto out_unmap;
-
- mapped_pages += 1;
- }
- }
-
- /* Everything is mapped - write the right values into s->dma_address */
- for_each_sg(sglist, s, nelems, i) {
- /*
- * Add in the remaining piece of the scatter-gather offset that
- * was masked out when we were determining the physical address
- * via (sg_phys(s) & PAGE_MASK) earlier.
- */
- s->dma_address += address + (s->offset & ~PAGE_MASK);
- s->dma_length = s->length;
- }
-
- if (s)
- domain_flush_np_cache(domain, s->dma_address, s->dma_length);
-
- return nelems;
-
-out_unmap:
- dev_err(dev, "IOMMU mapping error in map_sg (io-pages: %d reason: %d)\n",
- npages, ret);
-
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
-
- if (--mapped_pages == 0)
- goto out_free_iova;
- }
- }
-
-out_free_iova:
- free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
-
-out_err:
- return 0;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static void unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- unsigned long startaddr;
- int npages;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- startaddr = sg_dma_address(sglist) & PAGE_MASK;
- dma_dom = to_dma_ops_domain(domain);
- npages = sg_num_pages(dev, sglist, nelems);
-
- __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
-}
-
-/*
- * The exported alloc_coherent function for dma_ops.
- */
-static void *alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs)
-{
- u64 dma_mask = dev->coherent_dma_mask;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL) {
- page = alloc_pages(flag, get_order(size));
- *dma_addr = page_to_phys(page);
- return page_address(page);
- } else if (IS_ERR(domain))
- return NULL;
-
- dma_dom = to_dma_ops_domain(domain);
- size = PAGE_ALIGN(size);
- dma_mask = dev->coherent_dma_mask;
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
-
- page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
- if (!page) {
- if (!gfpflags_allow_blocking(flag))
- return NULL;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag & __GFP_NOWARN);
- if (!page)
- return NULL;
- }
-
- if (!dma_mask)
- dma_mask = *dev->dma_mask;
-
- *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
- size, DMA_BIDIRECTIONAL, dma_mask);
-
- if (*dma_addr == DMA_MAPPING_ERROR)
- goto out_free;
-
- return page_address(page);
-
-out_free:
-
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-
- return NULL;
-}
-
-/*
- * The exported free_coherent function for dma_ops.
- */
-static void free_coherent(struct device *dev, size_t size,
- void *virt_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- page = virt_to_page(virt_addr);
- size = PAGE_ALIGN(size);
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- goto free_mem;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
-
-free_mem:
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-}
-
-/*
- * This function is called by the DMA layer to find out if we can handle a
- * particular device. It is part of the dma_ops.
- */
-static int amd_iommu_dma_supported(struct device *dev, u64 mask)
-{
- if (!dma_direct_supported(dev, mask))
- return 0;
- return check_device(dev);
-}
-
-static const struct dma_map_ops amd_iommu_dma_ops = {
- .alloc = alloc_coherent,
- .free = free_coherent,
- .map_page = map_page,
- .unmap_page = unmap_page,
- .map_sg = map_sg,
- .unmap_sg = unmap_sg,
- .dma_supported = amd_iommu_dma_supported,
- .mmap = dma_common_mmap,
- .get_sgtable = dma_common_get_sgtable,
-};
-
-static int init_reserved_iova_ranges(void)
-{
- struct pci_dev *pdev = NULL;
- struct iova *val;
-
- init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
-
- lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
- &reserved_rbtree_key);
-
- /* MSI memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
- if (!val) {
- pr_err("Reserving MSI range failed\n");
- return -ENOMEM;
- }
-
- /* HT memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
- if (!val) {
- pr_err("Reserving HT range failed\n");
- return -ENOMEM;
- }
-
- /*
- * Memory used for PCI resources
- * FIXME: Check whether we can reserve the PCI-hole completly
- */
- for_each_pci_dev(pdev) {
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
- struct resource *r = &pdev->resource[i];
-
- if (!(r->flags & IORESOURCE_MEM))
- continue;
-
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!val) {
- pci_err(pdev, "Reserve pci-resource range %pR failed\n", r);
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
int __init amd_iommu_init_api(void)
{
int ret, err = 0;
@@ -2842,10 +2279,6 @@ int __init amd_iommu_init_api(void)
if (ret)
return ret;
- ret = init_reserved_iova_ranges();
- if (ret)
- return ret;
-
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
@@ -2916,7 +2349,6 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
- mutex_init(&domain->api_lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
@@ -2947,7 +2379,6 @@ out_err:
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
- struct dma_ops_domain *dma_domain;
switch (type) {
case IOMMU_DOMAIN_UNMANAGED:
@@ -2968,12 +2399,11 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
break;
case IOMMU_DOMAIN_DMA:
- dma_domain = dma_ops_domain_alloc();
- if (!dma_domain) {
+ pdomain = dma_ops_domain_alloc();
+ if (!pdomain) {
pr_err("Failed to allocate\n");
return NULL;
}
- pdomain = &dma_domain->domain;
break;
case IOMMU_DOMAIN_IDENTITY:
pdomain = protection_domain_alloc();
@@ -2992,7 +2422,6 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
domain = to_pdomain(dom);
@@ -3007,8 +2436,7 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
switch (dom->type) {
case IOMMU_DOMAIN_DMA:
/* Now release the domain */
- dma_dom = to_dma_ops_domain(domain);
- dma_ops_domain_free(dma_dom);
+ dma_ops_domain_free(domain);
break;
default:
if (domain->mode != PAGE_MODE_NONE)
@@ -3064,6 +2492,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return -EINVAL;
dev_data = dev->archdata.iommu;
+ dev_data->defer_attach = false;
iommu = amd_iommu_rlookup_table[dev_data->devid];
if (!iommu)
@@ -3089,7 +2518,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot,
+ gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
@@ -3103,9 +2533,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
- mutex_unlock(&domain->api_lock);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
domain_flush_np_cache(domain, iova, page_size);
@@ -3117,16 +2545,11 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
return 0;
- mutex_lock(&domain->api_lock);
- unmap_size = iommu_unmap_page(domain, iova, page_size);
- mutex_unlock(&domain->api_lock);
-
- return unmap_size;
+ return iommu_unmap_page(domain, iova, page_size);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3227,19 +2650,6 @@ static void amd_iommu_put_resv_regions(struct device *dev,
kfree(entry);
}
-static void amd_iommu_apply_resv_region(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region)
-{
- struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
- unsigned long start, end;
-
- start = IOVA_PFN(region->start);
- end = IOVA_PFN(region->start + region->length - 1);
-
- WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
-}
-
static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
{
@@ -3276,9 +2686,9 @@ const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
+ .domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = amd_iommu_put_resv_regions,
- .apply_resv_region = amd_iommu_apply_resv_region,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
@@ -3590,9 +3000,23 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
struct protection_domain *pdomain;
+ struct iommu_domain *io_domain;
+ struct device *dev = &pdev->dev;
+
+ if (!check_device(dev))
+ return NULL;
- pdomain = get_domain(&pdev->dev);
- if (IS_ERR(pdomain))
+ pdomain = get_dev_data(dev)->domain;
+ if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
+ get_dev_data(dev)->defer_attach = false;
+ io_domain = iommu_get_domain_for_dev(dev);
+ pdomain = to_pdomain(io_domain);
+ attach_device(dev, pdomain);
+ }
+ if (pdomain == NULL)
+ return NULL;
+
+ if (!dma_ops_domain(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
@@ -3732,7 +3156,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
iommu_flush_dte(iommu, devid);
}
-static struct irq_remap_table *alloc_irq_table(u16 devid)
+static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
+ void *data)
+{
+ struct irq_remap_table *table = data;
+
+ irq_lookup_table[alias] = table;
+ set_dte_irq_entry(alias, table);
+
+ iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+
+ return 0;
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
@@ -3778,7 +3215,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
table = new_table;
new_table = NULL;
- set_remap_table_entry(iommu, devid, table);
+ if (pdev)
+ pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
+ table);
+ else
+ set_remap_table_entry(iommu, devid, table);
+
if (devid != alias)
set_remap_table_entry(iommu, alias, table);
@@ -3795,7 +3237,8 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align)
+static int alloc_irq_index(u16 devid, int count, bool align,
+ struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
@@ -3805,7 +3248,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (!iommu)
return -ENODEV;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, pdev);
if (!table)
return -ENODEV;
@@ -4238,7 +3681,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_remap_table *table;
struct amd_iommu *iommu;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -4255,11 +3698,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
} else {
index = -ENOMEM;
}
- } else {
+ } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
+ info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
- index = alloc_irq_index(devid, nr_irqs, align);
+ index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
+ } else {
+ index = alloc_irq_index(devid, nr_irqs, false, NULL);
}
+
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
ret = index;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 17bd5a349119..f52f59d5c6bd 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -468,7 +468,6 @@ struct protection_domain {
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
- struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
@@ -639,8 +638,8 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
+ struct pci_dev *pdev;
u16 devid; /* PCI Device ID */
- u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index 5c87a38620c4..b2fe72a8f019 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -109,7 +109,7 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
-static int arm_mmu500_reset(struct arm_smmu_device *smmu)
+int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
u32 reg, major;
int i;
@@ -170,5 +170,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
"calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
+ if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
+ return qcom_smmu_impl_init(smmu);
+
return smmu;
}
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
new file mode 100644
index 000000000000..24c071c1d8b0
--- /dev/null
+++ b/drivers/iommu/arm-smmu-qcom.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/qcom_scm.h>
+
+#include "arm-smmu.h"
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+};
+
+static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ arm_mmu500_reset(smmu);
+
+ /*
+ * To address performance degradation in non-real time clients,
+ * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
+ * such as MTP and db845, whose firmwares implement secure monitor
+ * call handlers to turn on/off the wait-for-safe logic.
+ */
+ ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
+ if (ret)
+ dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
+
+ return ret;
+}
+
+static const struct arm_smmu_impl qcom_smmu_impl = {
+ .reset = qcom_sdm845_smmu500_reset,
+};
+
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct qcom_smmu *qsmmu;
+
+ qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
+ if (!qsmmu)
+ return ERR_PTR(-ENOMEM);
+
+ qsmmu->smmu = *smmu;
+
+ qsmmu->smmu.impl = &qcom_smmu_impl;
+ devm_kfree(smmu->dev, smmu);
+
+ return &qsmmu->smmu;
+}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8da93e730d6f..effe72eb89e7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2172,7 +2172,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
cfg->cd.asid = (u16)asid;
cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
return 0;
out_free_asid:
@@ -2448,7 +2448,7 @@ out_unlock:
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
@@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Interrupt lines */
- irq = platform_get_irq_byname(pdev, "combined");
+ irq = platform_get_irq_byname_optional(pdev, "combined");
if (irq > 0)
smmu->combined_irq = irq;
else {
- irq = platform_get_irq_byname(pdev, "eventq");
+ irq = platform_get_irq_byname_optional(pdev, "eventq");
if (irq > 0)
smmu->evtq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "priq");
+ irq = platform_get_irq_byname_optional(pdev, "priq");
if (irq > 0)
smmu->priq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "gerror");
+ irq = platform_get_irq_byname_optional(pdev, "gerror");
if (irq > 0)
smmu->gerr_irq = irq;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7c503a6bc585..4f1a350d9529 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/amba/bus.h>
@@ -122,7 +123,7 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{
if (pm_runtime_enabled(smmu->dev))
- pm_runtime_put(smmu->dev);
+ pm_runtime_put_autosuspend(smmu->dev);
}
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -244,6 +245,9 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
unsigned int spin_cnt, delay;
u32 reg;
+ if (smmu->impl && unlikely(smmu->impl->tlb_sync))
+ return smmu->impl->tlb_sync(smmu, page, sync, status);
+
arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
@@ -268,9 +272,8 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
-static void arm_smmu_tlb_sync_context(void *cookie)
+static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long flags;
@@ -280,13 +283,6 @@ static void arm_smmu_tlb_sync_context(void *cookie)
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
-static void arm_smmu_tlb_sync_vmid(void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
-
- arm_smmu_tlb_sync_global(smmu_domain->smmu);
-}
-
static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
@@ -297,7 +293,7 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
wmb();
arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
- arm_smmu_tlb_sync_context(cookie);
+ arm_smmu_tlb_sync_context(smmu_domain);
}
static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -312,18 +308,16 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
}
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- int reg, idx = cfg->cbndx;
+ int idx = cfg->cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
-
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova = (iova >> 12) << 12;
iova |= cfg->asid;
@@ -342,16 +336,15 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
}
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- int reg, idx = smmu_domain->cfg.cbndx;
+ int idx = smmu_domain->cfg.cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
do {
if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
@@ -362,85 +355,98 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
} while (size -= granule);
}
-/*
- * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
- * almost negligible, but the benefit of getting the first one in as far ahead
- * of the sync as possible is significant, hence we don't just make this a
- * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
- */
-static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- wmb();
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVA);
+ arm_smmu_tlb_sync_context(cookie);
+}
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+}
- ops->tlb_inv_range(iova, size, granule, false, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
+ arm_smmu_tlb_sync_context(cookie);
+}
- ops->tlb_inv_range(iova, size, granule, true, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
}
-static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
+static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_context_s2(cookie);
+}
+/*
+ * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
+ * almost negligible, but the benefit of getting the first one in as far ahead
+ * of the sync as possible is significant, hence we don't just make this a
+ * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
+ * think.
+ */
+static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
- ops->tlb_inv_range(iova, granule, granule, true, cookie);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
}
-static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s1,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s1,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s2,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
- .tlb_sync = arm_smmu_tlb_sync_vmid,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -472,6 +478,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
struct arm_smmu_device *smmu = dev;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
@@ -481,11 +489,19 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
if (!gfsr)
return IRQ_NONE;
- dev_err_ratelimited(smmu->dev,
- "Unexpected global fault, this could be serious\n");
- dev_err_ratelimited(smmu->dev,
- "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
- gfsr, gfsynr0, gfsynr1, gfsynr2);
+ if (__ratelimit(&rs)) {
+ if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
+ (gfsr & sGFSR_USF))
+ dev_err(smmu->dev,
+ "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
+ (u16)gfsynr1);
+ else
+ dev_err(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+ }
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
return IRQ_HANDLED;
@@ -536,8 +552,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
} else {
- cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
- cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+ cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
}
}
}
@@ -770,7 +786,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
- .tlb = &smmu_domain->flush_ops->tlb,
+ .tlb = smmu_domain->flush_ops,
.iommu_dev = smmu->dev,
};
@@ -1039,8 +1055,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (!group)
- group = ERR_PTR(-ENOMEM);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_err;
@@ -1154,13 +1168,27 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+ /*
+ * Setup an autosuspend delay to avoid bouncing runpm state.
+ * Otherwise, if a driver for a suspended consumer device
+ * unmaps buffers, it will runpm resume/suspend for each one.
+ *
+ * For example, when used by a GPU device, when an application
+ * or game exits, it can trigger unmapping 100s or 1000s of
+ * buffers. With a runpm cycle for each buffer, that adds up
+ * to 5-10sec worth of reprogramming the context bank, while
+ * the system appears to be locked up to the user.
+ */
+ pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+ pm_runtime_use_autosuspend(smmu->dev);
+
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1200,7 +1228,7 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
+ smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
@@ -1211,11 +1239,16 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->flush_ops) {
- arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb_sync(smmu_domain);
- arm_smmu_rpm_put(smmu);
- }
+ if (!smmu)
+ return;
+
+ arm_smmu_rpm_get(smmu);
+ if (smmu->version == ARM_SMMU_V2 ||
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ arm_smmu_tlb_sync_context(smmu_domain);
+ else
+ arm_smmu_tlb_sync_global(smmu);
+ arm_smmu_rpm_put(smmu);
}
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -2062,10 +2095,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
for (i = 0; i < num_irqs; ++i) {
int irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(dev, "failed to get irq index %d\n", i);
+ if (irq < 0)
return -ENODEV;
- }
smmu->irqs[i] = irq;
}
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index b19b6cae9b5e..62b9f0cec49b 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -79,6 +79,8 @@
#define ID7_MINOR GENMASK(3, 0)
#define ARM_SMMU_GR0_sGFSR 0x48
+#define sGFSR_USF BIT(1)
+
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
@@ -304,17 +306,10 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_BYPASS,
};
-struct arm_smmu_flush_ops {
- struct iommu_flush_ops tlb;
- void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie);
- void (*tlb_sync)(void *cookie);
-};
-
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
- const struct arm_smmu_flush_ops *flush_ops;
+ const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
@@ -335,6 +330,8 @@ struct arm_smmu_impl {
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain);
+ void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
+ int status);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@@ -398,5 +395,8 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
+
+int arm_mmu500_reset(struct arm_smmu_device *smmu);
#endif /* _ARM_SMMU_H */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 646332fbf3d7..0cc702a70a96 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
+static int iommu_dma_deferred_attach(struct device *dev,
+ struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = domain->ops;
+
+ if (!is_kdump_kernel())
+ return 0;
+
+ if (unlikely(ops->is_attach_deferred &&
+ ops->is_attach_deferred(domain, dev)))
+ return iommu_attach_device(domain, dev);
+
+ return 0;
+}
+
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
@@ -461,7 +477,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot)
+ size_t size, int prot, dma_addr_t dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -469,13 +485,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return DMA_MAPPING_ERROR;
+
size = iova_align(iovad, size + iova_off);
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
return DMA_MAPPING_ERROR;
}
@@ -578,6 +597,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return NULL;
+
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
@@ -610,7 +632,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
@@ -710,7 +732,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
- dma_handle =__iommu_dma_map(dev, phys, size, prot);
+ dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(phys, size, dir);
@@ -820,6 +842,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev);
int i;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return 0;
+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
@@ -870,7 +895,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
+ if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
@@ -910,7 +935,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ dma_get_mask(dev));
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1016,7 +1042,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!cpu_addr)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
+ dev->coherent_dma_mask);
if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr);
return NULL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index eecd6a421667..3acfa6a25fa2 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -895,8 +895,11 @@ int __init detect_intel_iommu(void)
}
#ifdef CONFIG_X86
- if (!ret)
+ if (!ret) {
x86_init.iommu.iommu_init = intel_iommu_init;
+ x86_platform.iommu_shutdown = intel_iommu_shutdown;
+ }
+
#endif
if (dmar_tbl) {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 9c94e16fb127..186ff5cc975c 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/
static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size,
- int prot)
+ int prot, gfp_t gfp)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6db6d969e31c..0c8d81f56a30 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2420,14 +2420,24 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-/*
- * find_domain
- * Note: we use struct device->archdata.iommu stores the info
- */
static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
+ dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
+ return NULL;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = dev->archdata.iommu;
+ if (likely(info))
+ return info->domain;
+
+ return NULL;
+}
+
+static struct dmar_domain *deferred_attach_domain(struct device *dev)
+{
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
struct iommu_domain *domain;
@@ -2437,12 +2447,7 @@ static struct dmar_domain *find_domain(struct device *dev)
intel_iommu_attach_device(domain, dev);
}
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
-
- if (likely(info))
- return info->domain;
- return NULL;
+ return find_domain(dev);
}
static inline struct device_domain_info *
@@ -3512,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3732,7 +3737,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return 0;
@@ -3827,7 +3832,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
int prot = 0;
int ret;
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (WARN_ON(dir == DMA_NONE || !domain))
return DMA_MAPPING_ERROR;
@@ -4314,13 +4319,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
+ int ret;
+
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+ ret = arch_rmrr_sanity_check(rmrr);
+ if (ret)
+ return ret;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
goto out;
rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
+
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
@@ -4759,6 +4770,26 @@ static void intel_disable_iommus(void)
iommu_disable_translation(iommu);
}
+void intel_iommu_shutdown(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ if (no_iommu || dmar_disabled)
+ return;
+
+ down_write(&dmar_global_lock);
+
+ /* Disable PMRs explicitly here. */
+ for_each_iommu(iommu, drhd)
+ iommu_disable_protect_mem_regions(iommu);
+
+ /* Make sure the IOMMUs are switched off */
+ intel_disable_iommus();
+
+ up_write(&dmar_global_lock);
+}
+
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
@@ -5440,7 +5471,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+ size_t size, int iommu_prot, gfp_t gfp)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 4cb394937700..7c3bd2c3cdca 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -846,27 +846,28 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
-static const struct iommu_flush_ops dummy_tlb_ops = {
+static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index ca51036aa53c..bdf47f745268 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -32,39 +32,31 @@
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
/*
- * For consistency with the architecture, we always consider
- * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
- */
-#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
-
-/*
* Calculate the right shift amount to get to the portion describing level l
* in a virtual address mapped by the pagetable in d.
*/
#define ARM_LPAE_LVL_SHIFT(l,d) \
- ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
- * (d)->bits_per_level) + (d)->pg_shift)
+ (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
+ ilog2(sizeof(arm_lpae_iopte)))
-#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
-
-#define ARM_LPAE_PAGES_PER_PGD(d) \
- DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
+#define ARM_LPAE_GRANULE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
+#define ARM_LPAE_PGD_SIZE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
/*
* Calculate the index at level l used to map virtual address a using the
* pagetable in d.
*/
#define ARM_LPAE_PGD_IDX(l,d) \
- ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+ ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
#define ARM_LPAE_LVL_IDX(a,l,d) \
(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
/* Calculate the block/page mapping size at level l for pagetable in d. */
-#define ARM_LPAE_BLOCK_SIZE(l,d) \
- (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
- ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
/* Page table bits */
#define ARM_LPAE_PTE_TYPE_SHIFT 0
@@ -180,10 +172,9 @@
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
- int levels;
- size_t pgd_size;
- unsigned long pg_shift;
- unsigned long bits_per_level;
+ int pgd_bits;
+ int start_level;
+ int bits_per_level;
void *pgd;
};
@@ -213,7 +204,7 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
{
u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
- if (data->pg_shift < 16)
+ if (ARM_LPAE_GRANULE(data) < SZ_64K)
return paddr;
/* Rotate the packed high-order bits back to the top */
@@ -392,7 +383,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */
- if (size == block_size && (size & cfg->pgsize_bitmap))
+ if (size == block_size)
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */
@@ -464,7 +455,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
- else if (prot & IOMMU_QCOM_SYS_CACHE)
+ else if (prot & IOMMU_SYS_CACHE_ONLY)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
@@ -479,16 +470,19 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int ret, lvl = ARM_LPAE_START_LVL(data);
+ int ret, lvl = data->start_level;
arm_lpae_iopte prot;
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
- paddr >= (1ULL << data->iop.cfg.oas)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return -EINVAL;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -508,8 +502,8 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *start, *end;
unsigned long table_size;
- if (lvl == ARM_LPAE_START_LVL(data))
- table_size = data->pgd_size;
+ if (lvl == data->start_level)
+ table_size = ARM_LPAE_PGD_SIZE(data);
else
table_size = ARM_LPAE_GRANULE(data);
@@ -537,7 +531,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
- __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
kfree(data);
}
@@ -652,13 +646,16 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return 0;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias))
return 0;
- return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -666,7 +663,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
+ int lvl = data->start_level;
do {
/* Valid IOPTE pointer? */
@@ -743,8 +740,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
- unsigned long va_bits, pgd_bits;
struct arm_lpae_io_pgtable *data;
+ int levels, va_bits, pg_shift;
arm_lpae_restrict_pgsizes(cfg);
@@ -766,15 +763,15 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
if (!data)
return NULL;
- data->pg_shift = __ffs(cfg->pgsize_bitmap);
- data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
- va_bits = cfg->ias - data->pg_shift;
- data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ va_bits = cfg->ias - pg_shift;
+ levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ data->start_level = ARM_LPAE_MAX_LEVELS - levels;
/* Calculate the actual size of our pgd (without concatenation) */
- pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
- data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+ data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
data->iop.ops = (struct io_pgtable_ops) {
.map = arm_lpae_map,
@@ -864,11 +861,11 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_MAIR_ATTR_INC_OWBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
- cfg->arm_lpae_s1_cfg.mair[0] = reg;
- cfg->arm_lpae_s1_cfg.mair[1] = 0;
+ cfg->arm_lpae_s1_cfg.mair = reg;
/* Looking good; allocate a pgd */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -903,13 +900,13 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
* Concatenate PGDs at level 1 if possible in order to reduce
* the depth of the stage-2 walk.
*/
- if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ if (data->start_level == 0) {
unsigned long pgd_pages;
- pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_size = pgd_pages << data->pg_shift;
- data->levels--;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
}
@@ -919,7 +916,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
- sl = ARM_LPAE_START_LVL(data);
+ sl = data->start_level;
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
@@ -965,7 +962,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -1034,9 +1032,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
/* Mali seems to need a full 4-level table regardless of IAS */
- if (data->levels < ARM_LPAE_MAX_LEVELS) {
- data->levels = ARM_LPAE_MAX_LEVELS;
- data->pgd_size = sizeof(arm_lpae_iopte);
+ if (data->start_level > 0) {
+ data->start_level = 0;
+ data->pgd_bits = 0;
}
/*
* MEMATTR: Mali has no actual notion of a non-cacheable type, so the
@@ -1053,7 +1051,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
+ cfg);
if (!data->pgd)
goto out_free_data;
@@ -1097,22 +1096,23 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
@@ -1131,9 +1131,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
cfg->pgsize_bitmap, cfg->ias);
- pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
- data->levels, data->pgd_size, data->pg_shift,
- data->bits_per_level, data->pgd);
+ pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
+ ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
+ ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
}
#define __FAIL(ops, i) ({ \
@@ -1145,7 +1145,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
{
- static const enum io_pgtable_fmt fmts[] = {
+ static const enum io_pgtable_fmt fmts[] __initconst = {
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
};
@@ -1244,13 +1244,13 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
static int __init arm_lpae_do_selftests(void)
{
- static const unsigned long pgsize[] = {
+ static const unsigned long pgsize[] __initconst = {
SZ_4K | SZ_2M | SZ_1G,
SZ_16K | SZ_32M,
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] = {
+ static const unsigned int ias[] __initconst = {
32, 36, 40, 42, 44, 48,
};
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
new file mode 100644
index 000000000000..0f8dd377aada
--- /dev/null
+++ b/drivers/iommu/ioasid.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I/O Address Space ID allocator. There is one global IOASID space, split into
+ * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
+ * free IOASIDs with ioasid_alloc and ioasid_free.
+ */
+#include <linux/ioasid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+struct ioasid_data {
+ ioasid_t id;
+ struct ioasid_set *set;
+ void *private;
+ struct rcu_head rcu;
+};
+
+/*
+ * struct ioasid_allocator_data - Internal data structure to hold information
+ * about an allocator. There are two types of allocators:
+ *
+ * - Default allocator always has its own XArray to track the IOASIDs allocated.
+ * - Custom allocators may share allocation helpers with different private data.
+ * Custom allocators that share the same helper functions also share the same
+ * XArray.
+ * Rules:
+ * 1. Default allocator is always available, not dynamically registered. This is
+ * to prevent race conditions with early boot code that want to register
+ * custom allocators or allocate IOASIDs.
+ * 2. Custom allocators take precedence over the default allocator.
+ * 3. When all custom allocators sharing the same helper functions are
+ * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
+ * freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
+ * 4. When switching between custom allocators sharing the same helper
+ * functions, outstanding IOASIDs are preserved.
+ * 5. When switching between custom allocator and default allocator, all IOASIDs
+ * must be freed to ensure unadulterated space for the new allocator.
+ *
+ * @ops: allocator helper functions and its data
+ * @list: registered custom allocators
+ * @slist: allocators share the same ops but different data
+ * @flags: attributes of the allocator
+ * @xa: xarray holds the IOASID space
+ * @rcu: used for kfree_rcu when unregistering allocator
+ */
+struct ioasid_allocator_data {
+ struct ioasid_allocator_ops *ops;
+ struct list_head list;
+ struct list_head slist;
+#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
+ unsigned long flags;
+ struct xarray xa;
+ struct rcu_head rcu;
+};
+
+static DEFINE_SPINLOCK(ioasid_allocator_lock);
+static LIST_HEAD(allocators_list);
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
+static void default_free(ioasid_t ioasid, void *opaque);
+
+static struct ioasid_allocator_ops default_ops = {
+ .alloc = default_alloc,
+ .free = default_free,
+};
+
+static struct ioasid_allocator_data default_allocator = {
+ .ops = &default_ops,
+ .flags = 0,
+ .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
+};
+
+static struct ioasid_allocator_data *active_allocator = &default_allocator;
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
+{
+ ioasid_t id;
+
+ if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
+ pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
+ return INVALID_IOASID;
+ }
+
+ return id;
+}
+
+static void default_free(ioasid_t ioasid, void *opaque)
+{
+ struct ioasid_data *ioasid_data;
+
+ ioasid_data = xa_erase(&default_allocator.xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+}
+
+/* Allocate and initialize a new custom allocator with its helper functions */
+static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+
+ ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
+ if (!ia_data)
+ return NULL;
+
+ xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
+ INIT_LIST_HEAD(&ia_data->slist);
+ ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
+ ia_data->ops = ops;
+
+ /* For tracking custom allocators that share the same ops */
+ list_add_tail(&ops->list, &ia_data->slist);
+
+ return ia_data;
+}
+
+static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
+{
+ return (a->free == b->free) && (a->alloc == b->alloc);
+}
+
+/**
+ * ioasid_register_allocator - register a custom allocator
+ * @ops: the custom allocator ops to be registered
+ *
+ * Custom allocators take precedence over the default xarray based allocator.
+ * Private data associated with the IOASID allocated by the custom allocators
+ * are managed by IOASID framework similar to data stored in xa by default
+ * allocator.
+ *
+ * There can be multiple allocators registered but only one is active. In case
+ * of runtime removal of a custom allocator, the next one is activated based
+ * on the registration ordering.
+ *
+ * Multiple allocators can share the same alloc() function, in this case the
+ * IOASID space is shared.
+ */
+int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+ struct ioasid_allocator_data *pallocator;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+
+ ia_data = ioasid_alloc_allocator(ops);
+ if (!ia_data) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /*
+ * No particular preference, we activate the first one and keep
+ * the later registered allocators in a list in case the first one gets
+ * removed due to hotplug.
+ */
+ if (list_empty(&allocators_list)) {
+ WARN_ON(active_allocator != &default_allocator);
+ /* Use this new allocator if default is not active */
+ if (xa_empty(&active_allocator->xa)) {
+ rcu_assign_pointer(active_allocator, ia_data);
+ list_add_tail(&ia_data->list, &allocators_list);
+ goto out_unlock;
+ }
+ pr_warn("Default allocator active with outstanding IOASID\n");
+ ret = -EAGAIN;
+ goto out_free;
+ }
+
+ /* Check if the allocator is already registered */
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (pallocator->ops == ops) {
+ pr_err("IOASID allocator already registered\n");
+ ret = -EEXIST;
+ goto out_free;
+ } else if (use_same_ops(pallocator->ops, ops)) {
+ /*
+ * If the new allocator shares the same ops,
+ * then they will share the same IOASID space.
+ * We should put them under the same xarray.
+ */
+ list_add_tail(&ops->list, &pallocator->slist);
+ goto out_free;
+ }
+ }
+ list_add_tail(&ia_data->list, &allocators_list);
+
+ spin_unlock(&ioasid_allocator_lock);
+ return 0;
+out_free:
+ kfree(ia_data);
+out_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_register_allocator);
+
+/**
+ * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
+ * @ops: the custom allocator to be removed
+ *
+ * Remove an allocator from the list, activate the next allocator in
+ * the order it was registered. Or revert to default allocator if all
+ * custom allocators are unregistered without outstanding IOASIDs.
+ */
+void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *pallocator;
+ struct ioasid_allocator_ops *sops;
+
+ spin_lock(&ioasid_allocator_lock);
+ if (list_empty(&allocators_list)) {
+ pr_warn("No custom IOASID allocators active!\n");
+ goto exit_unlock;
+ }
+
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (!use_same_ops(pallocator->ops, ops))
+ continue;
+
+ if (list_is_singular(&pallocator->slist)) {
+ /* No shared helper functions */
+ list_del(&pallocator->list);
+ /*
+ * All IOASIDs should have been freed before
+ * the last allocator that shares the same ops
+ * is unregistered.
+ */
+ WARN_ON(!xa_empty(&pallocator->xa));
+ if (list_empty(&allocators_list)) {
+ pr_info("No custom IOASID allocators, switch to default.\n");
+ rcu_assign_pointer(active_allocator, &default_allocator);
+ } else if (pallocator == active_allocator) {
+ rcu_assign_pointer(active_allocator,
+ list_first_entry(&allocators_list,
+ struct ioasid_allocator_data, list));
+ pr_info("IOASID allocator changed");
+ }
+ kfree_rcu(pallocator, rcu);
+ break;
+ }
+ /*
+ * Find the matching shared ops to delete,
+ * but keep outstanding IOASIDs
+ */
+ list_for_each_entry(sops, &pallocator->slist, list) {
+ if (sops == ops) {
+ list_del(&ops->list);
+ break;
+ }
+ }
+ break;
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
+
+/**
+ * ioasid_set_data - Set private data for an allocated ioasid
+ * @ioasid: the ID to set data
+ * @data: the private data
+ *
+ * For IOASID that is already allocated, private data can be set
+ * via this API. Future lookup can be done via ioasid_find.
+ */
+int ioasid_set_data(ioasid_t ioasid, void *data)
+{
+ struct ioasid_data *ioasid_data;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (ioasid_data)
+ rcu_assign_pointer(ioasid_data->private, data);
+ else
+ ret = -ENOENT;
+ spin_unlock(&ioasid_allocator_lock);
+
+ /*
+ * Wait for readers to stop accessing the old private data, so the
+ * caller can free it.
+ */
+ if (!ret)
+ synchronize_rcu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_set_data);
+
+/**
+ * ioasid_alloc - Allocate an IOASID
+ * @set: the IOASID set
+ * @min: the minimum ID (inclusive)
+ * @max: the maximum ID (inclusive)
+ * @private: data private to the caller
+ *
+ * Allocate an ID between @min and @max. The @private pointer is stored
+ * internally and can be retrieved with ioasid_find().
+ *
+ * Return: the allocated ID on success, or %INVALID_IOASID on failure.
+ */
+ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
+ void *private)
+{
+ struct ioasid_data *data;
+ void *adata;
+ ioasid_t id;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return INVALID_IOASID;
+
+ data->set = set;
+ data->private = private;
+
+ /*
+ * Custom allocator needs allocator data to perform platform specific
+ * operations.
+ */
+ spin_lock(&ioasid_allocator_lock);
+ adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
+ id = active_allocator->ops->alloc(min, max, adata);
+ if (id == INVALID_IOASID) {
+ pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
+ goto exit_free;
+ }
+
+ if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
+ xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
+ /* Custom allocator needs framework to store and track allocation results */
+ pr_err("Failed to alloc ioasid from %d\n", id);
+ active_allocator->ops->free(id, active_allocator->ops->pdata);
+ goto exit_free;
+ }
+ data->id = id;
+
+ spin_unlock(&ioasid_allocator_lock);
+ return id;
+exit_free:
+ spin_unlock(&ioasid_allocator_lock);
+ kfree(data);
+ return INVALID_IOASID;
+}
+EXPORT_SYMBOL_GPL(ioasid_alloc);
+
+/**
+ * ioasid_free - Free an IOASID
+ * @ioasid: the ID to remove
+ */
+void ioasid_free(ioasid_t ioasid)
+{
+ struct ioasid_data *ioasid_data;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (!ioasid_data) {
+ pr_err("Trying to free unknown IOASID %u\n", ioasid);
+ goto exit_unlock;
+ }
+
+ active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
+ /* Custom allocator needs additional steps to free the xa element */
+ if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
+ ioasid_data = xa_erase(&active_allocator->xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_free);
+
+/**
+ * ioasid_find - Find IOASID data
+ * @set: the IOASID set
+ * @ioasid: the IOASID to find
+ * @getter: function to call on the found object
+ *
+ * The optional getter function allows to take a reference to the found object
+ * under the rcu lock. The function can also check if the object is still valid:
+ * if @getter returns false, then the object is invalid and NULL is returned.
+ *
+ * If the IOASID exists, return the private pointer passed to ioasid_alloc.
+ * Private data can be NULL if not set. Return an error if the IOASID is not
+ * found, or if @set is not NULL and the IOASID does not belong to the set.
+ */
+void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *))
+{
+ void *priv;
+ struct ioasid_data *ioasid_data;
+ struct ioasid_allocator_data *idata;
+
+ rcu_read_lock();
+ idata = rcu_dereference(active_allocator);
+ ioasid_data = xa_load(&idata->xa, ioasid);
+ if (!ioasid_data) {
+ priv = ERR_PTR(-ENOENT);
+ goto unlock;
+ }
+ if (set && ioasid_data->set != set) {
+ /* data found but does not belong to the set */
+ priv = ERR_PTR(-EACCES);
+ goto unlock;
+ }
+ /* Now IOASID and its set is verified, we can return the private data */
+ priv = rcu_dereference(ioasid_data->private);
+ if (getter && !getter(priv))
+ priv = NULL;
+unlock:
+ rcu_read_unlock();
+
+ return priv;
+}
+EXPORT_SYMBOL_GPL(ioasid_find);
+
+MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d658c7c6a2ab..db7bfd4f2d20 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1665,6 +1665,36 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
+int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ if (unlikely(!domain->ops->cache_invalidate))
+ return -ENODEV;
+
+ return domain->ops->cache_invalidate(domain, dev, inv_info);
+}
+EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
+
+int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data)
+{
+ if (unlikely(!domain->ops->sva_bind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_bind_gpasid(domain, dev, data);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
+
+int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ if (unlikely(!domain->ops->sva_unbind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_unbind_gpasid(dev, pasid);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
+
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1854,8 +1884,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -1892,8 +1922,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
+ ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
- ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1913,8 +1943,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ might_sleep();
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map);
+int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_atomic);
+
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -1991,8 +2035,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
+size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;
@@ -2003,7 +2048,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) {
- ret = iommu_map(domain, iova + mapped, start, len, prot);
+ ret = __iommu_map(domain, iova + mapped, start,
+ len, prot, gfp);
+
if (ret)
goto out_err;
@@ -2031,8 +2078,22 @@ out_err:
return 0;
}
+
+size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ might_sleep();
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map_sg);
+size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
+
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 2639fc718117..d02edd2751f3 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -50,6 +50,9 @@ struct ipmmu_features {
bool twobit_imttbcr_sl0;
bool reserved_context;
bool cache_snoop;
+ unsigned int ctx_offset_base;
+ unsigned int ctx_offset_stride;
+ unsigned int utlb_offset_base;
};
struct ipmmu_vmsa_device {
@@ -99,125 +102,49 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IM_NS_ALIAS_OFFSET 0x800
-#define IM_CTX_SIZE 0x40
-
-#define IMCTR 0x0000
-#define IMCTR_TRE (1 << 17)
-#define IMCTR_AFE (1 << 16)
-#define IMCTR_RTSEL_MASK (3 << 4)
-#define IMCTR_RTSEL_SHIFT 4
-#define IMCTR_TREN (1 << 3)
-#define IMCTR_INTEN (1 << 2)
-#define IMCTR_FLUSH (1 << 1)
-#define IMCTR_MMUEN (1 << 0)
-
-#define IMCAAR 0x0004
-
-#define IMTTBCR 0x0008
-#define IMTTBCR_EAE (1 << 31)
-#define IMTTBCR_PMB (1 << 30)
-#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_TSZ1_MASK (7 << 16)
-#define IMTTBCR_TSZ1_SHIFT 16
-#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */
+/* MMU "context" registers */
+#define IMCTR 0x0000 /* R-Car Gen2/3 */
+#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
+#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
+
+#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
+#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */
#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */
#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */
#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_LVL_2 (0 << 4)
-#define IMTTBCR_SL0_LVL_1 (1 << 4)
-#define IMTTBCR_TSZ0_MASK (7 << 0)
-#define IMTTBCR_TSZ0_SHIFT O
-
-#define IMBUSCR 0x000c
-#define IMBUSCR_DVM (1 << 2)
-#define IMBUSCR_BUSSEL_SYS (0 << 0)
-#define IMBUSCR_BUSSEL_CCI (1 << 0)
-#define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
-#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
-#define IMBUSCR_BUSSEL_MASK (3 << 0)
-
-#define IMTTLBR0 0x0010
-#define IMTTUBR0 0x0014
-#define IMTTLBR1 0x0018
-#define IMTTUBR1 0x001c
-
-#define IMSTR 0x0020
-#define IMSTR_ERRLVL_MASK (3 << 12)
-#define IMSTR_ERRLVL_SHIFT 12
-#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
-#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
-#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
-#define IMSTR_ERRCODE_MASK (7 << 8)
-#define IMSTR_MHIT (1 << 4)
-#define IMSTR_ABORT (1 << 2)
-#define IMSTR_PF (1 << 1)
-#define IMSTR_TF (1 << 0)
-
-#define IMMAIR0 0x0028
-#define IMMAIR1 0x002c
-#define IMMAIR_ATTR_MASK 0xff
-#define IMMAIR_ATTR_DEVICE 0x04
-#define IMMAIR_ATTR_NC 0x44
-#define IMMAIR_ATTR_WBRWA 0xff
-#define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
-#define IMMAIR_ATTR_IDX_NC 0
-#define IMMAIR_ATTR_IDX_WBRWA 1
-#define IMMAIR_ATTR_IDX_DEV 2
-
-#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
-#define IMEUAR 0x0034 /* R-Car Gen3 only */
-
-#define IMPCTR 0x0200
-#define IMPSTR 0x0208
-#define IMPEAR 0x020c
-#define IMPMBA(n) (0x0280 + ((n) * 4))
-#define IMPMBD(n) (0x02c0 + ((n) * 4))
+#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
+
+#define IMBUSCR 0x000c /* R-Car Gen2 only */
+#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
+#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
+
+#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
+#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
+
+#define IMSTR 0x0020 /* R-Car Gen2/3 */
+#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
+#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
+#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
+#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
+#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
+
+#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
+#define IMEUAR 0x0034 /* R-Car Gen3 only */
+
+/* uTLB registers */
#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
-#define IMUCTR0(n) (0x0300 + ((n) * 16))
-#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
-#define IMUCTR_FIXADDEN (1 << 31)
-#define IMUCTR_FIXADD_MASK (0xff << 16)
-#define IMUCTR_FIXADD_SHIFT 16
-#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
-#define IMUCTR_TTSEL_PMB (8 << 4)
-#define IMUCTR_TTSEL_MASK (15 << 4)
-#define IMUCTR_FLUSH (1 << 1)
-#define IMUCTR_MMUEN (1 << 0)
+#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
+#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
+#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
-#define IMUASID0(n) (0x0308 + ((n) * 16))
-#define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
-#define IMUASID_ASID8_MASK (0xff << 8)
-#define IMUASID_ASID8_SHIFT 8
-#define IMUASID_ASID0_MASK (0xff << 0)
-#define IMUASID_ASID0_SHIFT 0
+#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
/* -----------------------------------------------------------------------------
* Root device handling
@@ -264,29 +191,61 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
iowrite32(data, mmu->base + offset);
}
+static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return mmu->features->ctx_offset_base +
+ context_id * mmu->features->ctx_offset_stride + reg;
+}
+
+static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
+}
+
+static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
+}
+
static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg)
{
- return ipmmu_read(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg);
+ return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
}
static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
}
static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
if (domain->mmu != domain->mmu->root)
- ipmmu_write(domain->mmu,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
+}
+
+static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
+{
+ return mmu->features->utlb_offset_base + reg;
+}
+
+static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
+}
+
+static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
}
/* -----------------------------------------------------------------------------
@@ -334,11 +293,10 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
*/
/* TODO: What should we set the ASID to ? */
- ipmmu_write(mmu, IMUASID(utlb), 0);
+ ipmmu_imuasid_write(mmu, utlb, 0);
/* TODO: Do we need to flush the microTLB ? */
- ipmmu_write(mmu, IMUCTR(utlb),
- IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
- IMUCTR_MMUEN);
+ ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
+ IMUCTR_FLUSH | IMUCTR_MMUEN);
mmu->utlb_ctx[utlb] = domain->context_id;
}
@@ -350,7 +308,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
{
struct ipmmu_vmsa_device *mmu = domain->mmu;
- ipmmu_write(mmu, IMUCTR(utlb), 0);
+ ipmmu_imuctr_write(mmu, utlb, 0);
mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
}
@@ -438,7 +396,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
/* MAIR0 */
ipmmu_ctx_write_root(domain, IMMAIR0,
- domain->cfg.arm_lpae_s1_cfg.mair[0]);
+ domain->cfg.arm_lpae_s1_cfg.mair);
/* IMBUSCR */
if (domain->mmu->features->setup_imbuscr)
@@ -724,7 +682,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
}
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -783,6 +741,7 @@ static int ipmmu_init_platform_device(struct device *dev,
static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a774a1", },
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", },
{ .soc_id = "r8a7796", },
@@ -794,6 +753,7 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
};
static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", .revision = "ES3.*" },
{ .soc_id = "r8a77965", },
@@ -985,7 +945,7 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
/* Disable all contexts. */
for (i = 0; i < mmu->num_ctx; ++i)
- ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
+ ipmmu_ctx_write(mmu, i, IMCTR, 0);
}
static const struct ipmmu_features ipmmu_features_default = {
@@ -997,6 +957,9 @@ static const struct ipmmu_features ipmmu_features_default = {
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
.cache_snoop = true,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
@@ -1008,6 +971,9 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
.cache_snoop = false,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct of_device_id ipmmu_of_ids[] = {
@@ -1018,6 +984,9 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a774a1",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a774b1",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a774c0",
.data = &ipmmu_features_rcar_gen3,
}, {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index be99d408cf35..93f14bca26ee 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -504,7 +504,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t len, int prot)
+ phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 67a483c1a935..6fc1f5ecf91e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -101,8 +101,6 @@
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
struct mtk_iommu_domain {
- spinlock_t pgtlock; /* lock for page table */
-
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -173,13 +171,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
}
}
-static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf,
- void *cookie)
+static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
struct mtk_iommu_data *data = cookie;
+ unsigned long flags;
+ int ret;
+ u32 tmp;
for_each_m4u(data) {
+ spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
@@ -188,23 +189,10 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE,
data->base + REG_MMU_INVALIDATE);
- data->tlb_flush_active = true;
- }
-}
-
-static void mtk_iommu_tlb_sync(void *cookie)
-{
- struct mtk_iommu_data *data = cookie;
- int ret;
- u32 tmp;
-
- for_each_m4u(data) {
- /* Avoid timing out if there's nothing to wait for */
- if (!data->tlb_flush_active)
- return;
+ /* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
- tmp, tmp != 0, 10, 100000);
+ tmp, tmp != 0, 10, 1000);
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
@@ -212,35 +200,24 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- data->tlb_flush_active = false;
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
}
}
-static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
-static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{
- mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+ struct mtk_iommu_data *data = cookie;
+ struct iommu_domain *domain = &data->m4u_dom->domain;
+
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
}
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
- .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
+ .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
+ .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
};
@@ -316,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
{
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- spin_lock_init(&dom->pgtlock);
-
dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
@@ -412,22 +387,17 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
- int ret;
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
if (data->enable_4GB)
paddr |= BIT_ULL(32);
- spin_lock_irqsave(&dom->pgtlock, flags);
- ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
- return ret;
+ /* Synchronize with the tlb_lock */
+ return dom->iop->map(dom->iop, iova, paddr, size, prot);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -435,25 +405,26 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- unsigned long flags;
- size_t unmapsz;
-
- spin_lock_irqsave(&dom->pgtlock, flags);
- unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
- return unmapsz;
+ return dom->iop->unmap(dom->iop, iova, size, gather);
}
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ size_t length = gather->end - gather->start;
+
+ if (gather->start == ULONG_MAX)
+ return;
+
+ mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
+ data);
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -461,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
phys_addr_t pa;
- spin_lock_irqsave(&dom->pgtlock, flags);
pa = dom->iop->iova_to_phys(dom->iop, iova);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
@@ -733,6 +700,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ spin_lock_init(&data->tlb_lock);
list_add_tail(&data->list, &m4ulist);
if (!iommu_present(&platform_bus_type))
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index fc0f16eabacd..ea949a324e33 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -57,7 +57,7 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group;
bool enable_4GB;
- bool tlb_flush_active;
+ spinlock_t tlb_lock; /* lock for tlb range flush */
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b5efd6dac953..e93b94ecac45 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 614a93aa5305..026ad2b29dcd 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -8,6 +8,8 @@
#include <linux/export.h>
#include <linux/iommu.h>
#include <linux/limits.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 09c6e1c680db..be551cc34be4 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c31e7bc4ccbe..52f38292df5b 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -284,9 +284,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* MAIRs (stage-1 only) */
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[0]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair);
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[1]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
/* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
unsigned long flags;
@@ -539,8 +539,8 @@ static int qcom_iommu_add_device(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (IS_ERR_OR_NULL(group))
- return PTR_ERR_OR_ZERO(group);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
iommu_group_put(group);
iommu_device_link(&qcom_iommu->iommu, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4dcbf68dfda4..b33cdd5aad81 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -527,7 +527,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
int i, err;
err = pm_runtime_get_if_in_use(iommu->dev);
- if (WARN_ON_ONCE(err <= 0))
+ if (!err || WARN_ON_ONCE(err < 0))
return ret;
if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
@@ -758,7 +758,7 @@ unwind:
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!dma_dev)
return NULL;
- rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return NULL;
if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain))
- return NULL;
+ goto err_free_domain;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -1021,6 +1021,8 @@ err_free_dt:
err_put_cookie:
if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+err_free_domain:
+ kfree(rk_domain);
return NULL;
}
@@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+ kfree(rk_domain);
}
static int rk_iommu_add_device(struct device *dev)
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b0b18e23187..1137f3ddcb85 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -265,7 +265,7 @@ undo_cpu_trans:
}
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_VALID, rc = 0;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 3924f7c05544..3fb7ba72507d 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct gart_device *gart = gart_handle;
int ret;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7293fc3f796d..63a147b623e6 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
static inline void smmu_flush(struct tegra_smmu *smmu)
{
- smmu_readl(smmu, SMMU_CONFIG);
+ smmu_readl(smmu, SMMU_PTB_ASID);
}
static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
@@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
unsigned int i;
u32 value;
+ group = tegra_smmu_find_swgroup(smmu, swgroup);
+ if (group) {
+ value = smmu_readl(smmu, group->reg);
+ value &= ~SMMU_ASID_MASK;
+ value |= SMMU_ASID_VALUE(asid);
+ value |= SMMU_ASID_ENABLE;
+ smmu_writel(smmu, value, group->reg);
+ } else {
+ pr_warn("%s group from swgroup %u not found\n", __func__,
+ swgroup);
+ /* No point moving ahead if group was not found */
+ return;
+ }
+
for (i = 0; i < smmu->soc->num_clients; i++) {
const struct tegra_mc_client *client = &smmu->soc->clients[i];
@@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
value |= BIT(client->smmu.bit);
smmu_writel(smmu, value, client->smmu.reg);
}
-
- group = tegra_smmu_find_swgroup(smmu, swgroup);
- if (group) {
- value = smmu_readl(smmu, group->reg);
- value &= ~SMMU_ASID_MASK;
- value |= SMMU_ASID_VALUE(asid);
- value |= SMMU_ASID_ENABLE;
- smmu_writel(smmu, value, group->reg);
- }
}
static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
@@ -549,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;
@@ -557,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;
pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova);
}
@@ -599,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -624,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0);
@@ -650,7 +656,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 3ea9d7682999..315c7cc4f99d 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
*/
static int __viommu_sync_req(struct viommu_dev *viommu)
{
- int ret = 0;
unsigned int len;
size_t write_len;
struct viommu_request *req;
@@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu)
kfree(req);
}
- return ret;
+ return 0;
}
static int viommu_sync_req(struct viommu_dev *viommu)
@@ -713,7 +712,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
u32 flags;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index ccbb8973a324..697e6a8ccaae 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -370,6 +370,10 @@ config MVEBU_PIC
config MVEBU_SEI
bool
+config LS_EXTIRQ
+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ select MFD_SYSCON
+
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
@@ -483,8 +487,6 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N.
-endmenu
-
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
@@ -496,3 +498,5 @@ config SIFIVE_PLIC
interrupt sources are subordinate to the PLIC.
If you don't know what to do here, say Y.
+
+endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index cc7c43932f16..e806dda690ea 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
+obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index fc75c61233aa..cbf01afcd2a6 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/syscore_ops.h>
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@@ -39,6 +40,11 @@ struct bcm7038_l1_chip {
unsigned int n_words;
struct irq_domain *domain;
struct bcm7038_l1_cpu *cpus[NR_CPUS];
+#ifdef CONFIG_PM_SLEEP
+ struct list_head list;
+ u32 wake_mask[MAX_WORDS];
+#endif
+ u32 irq_fwd_mask[MAX_WORDS];
u8 affinity[MAX_WORDS * IRQS_PER_WORD];
};
@@ -249,6 +255,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
resource_size_t sz;
struct bcm7038_l1_cpu *cpu;
unsigned int i, n_words, parent_irq;
+ int ret;
if (of_address_to_resource(dn, idx, &res))
return -EINVAL;
@@ -262,6 +269,14 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
else if (intc->n_words != n_words)
return -EINVAL;
+ ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
+ intc->irq_fwd_mask, n_words);
+ if (ret != 0 && ret != -EINVAL) {
+ /* property exists but has the wrong number of words */
+ pr_err("invalid brcm,int-fwd-mask property\n");
+ return -EINVAL;
+ }
+
cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
GFP_KERNEL);
if (!cpu)
@@ -272,8 +287,11 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
return -ENOMEM;
for (i = 0; i < n_words; i++) {
- l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i));
- cpu->mask_cache[i] = 0xffffffff;
+ l1_writel(~intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_set(intc, i));
+ l1_writel(intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_clr(intc, i));
+ cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
}
parent_irq = irq_of_parse_and_map(dn, idx);
@@ -281,12 +299,89 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
+
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+ enable_irq_wake(parent_irq);
+
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+/*
+ * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
+ * used because the struct chip_type suspend/resume hooks are not called
+ * unless chip_type is hooked onto a generic_chip. Since this driver does
+ * not use generic_chip, we need to manually hook our resume/suspend to
+ * syscore_ops.
+ */
+static LIST_HEAD(bcm7038_l1_intcs_list);
+static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
+
+static int bcm7038_l1_suspend(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+ u32 val;
+
+ /* Wakeup interrupt should only come from the boot cpu */
+ boot_cpu = cpu_logical_map(0);
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
+ l1_writel(~val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+
+ return 0;
+}
+
+static void bcm7038_l1_resume(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+
+ boot_cpu = cpu_logical_map(0);
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+}
+
+static struct syscore_ops bcm7038_l1_syscore_ops = {
+ .suspend = bcm7038_l1_suspend,
+ .resume = bcm7038_l1_resume,
+};
+
+static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ if (on)
+ intc->wake_mask[word] |= mask;
+ else
+ intc->wake_mask[word] &= ~mask;
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ return 0;
+}
+#endif
+
static struct irq_chip bcm7038_l1_irq_chip = {
.name = "bcm7038-l1",
.irq_mask = bcm7038_l1_mask,
@@ -295,11 +390,21 @@ static struct irq_chip bcm7038_l1_irq_chip = {
#ifdef CONFIG_SMP
.irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif
+#ifdef CONFIG_PM_SLEEP
+ .irq_set_wake = bcm7038_l1_set_wake,
+#endif
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq)
{
+ struct bcm7038_l1_chip *intc = d->host_data;
+ u32 mask = BIT(hw_irq % IRQS_PER_WORD);
+ u32 word = hw_irq / IRQS_PER_WORD;
+
+ if (intc->irq_fwd_mask[word] & mask)
+ return -EPERM;
+
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
@@ -340,6 +445,16 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
goto out_unmap;
}
+#ifdef CONFIG_PM_SLEEP
+ /* Add bcm7038_l1_chip into a list */
+ raw_spin_lock(&bcm7038_l1_intcs_lock);
+ list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
+ raw_spin_unlock(&bcm7038_l1_intcs_lock);
+
+ if (list_is_singular(&bcm7038_l1_intcs_list))
+ register_syscore_ops(&bcm7038_l1_syscore_ops);
+#endif
+
pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
dn, IRQS_PER_WORD * intc->n_words);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index e88e75c22b6a..fbec07d634ad 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 229d586c3d7a..87711e0f8014 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -5,6 +5,7 @@
*/
#include <linux/acpi_iort.h>
+#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 787e8eec9a7f..e05673bcd52b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/cpu.h>
#include <linux/crash_dump.h>
@@ -102,20 +103,21 @@ struct its_node {
struct its_collection *collections;
struct fwnode_handle *fwnode_handle;
u64 (*get_msi_base)(struct its_device *its_dev);
+ u64 typer;
u64 cbaser_save;
u32 ctlr_save;
struct list_head its_device_list;
u64 flags;
unsigned long list_nr;
- u32 ite_size;
- u32 device_ids;
int numa_node;
unsigned int msi_domain_flags;
u32 pre_its_base; /* for Socionext Synquacer */
- bool is_v4;
int vlpi_redist_offset;
};
+#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
+#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
+
#define ITS_ITT_ALIGN SZ_256
/* The maximum number of VPEID bits supported by VLPI commands */
@@ -130,7 +132,7 @@ struct event_lpi_map {
u16 *col_map;
irq_hw_number_t lpi_base;
int nr_lpis;
- struct mutex vlpi_lock;
+ raw_spinlock_t vlpi_lock;
struct its_vm *vm;
struct its_vlpi_map *vlpi_maps;
int nr_vlpis;
@@ -181,7 +183,7 @@ static u16 get_its_list(struct its_vm *vm)
unsigned long its_list = 0;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (vm->vlpi_count[its->list_nr])
@@ -191,6 +193,12 @@ static u16 get_its_list(struct its_vm *vm)
return (u16)its_list;
}
+static inline u32 its_get_event_id(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ return d->hwirq - its_dev->event_map.lpi_base;
+}
+
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event)
{
@@ -199,6 +207,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
return its->collections + its_dev->event_map.col_map[event];
}
+static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
+ u32 event)
+{
+ if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
+ return NULL;
+
+ return &its_dev->event_map.vlpi_maps[event];
+}
+
+static struct its_collection *irq_to_col(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ return dev_event_to_col(its_dev, its_get_event_id(d));
+}
+
static struct its_collection *valid_col(struct its_collection *col)
{
if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
@@ -305,7 +329,10 @@ struct its_cmd_desc {
* The ITS command block, which is what the ITS actually parses.
*/
struct its_cmd_block {
- u64 raw_cmd[4];
+ union {
+ u64 raw_cmd[4];
+ __le64 raw_cmd_le[4];
+ };
};
#define ITS_CMD_QUEUE_SZ SZ_64K
@@ -414,10 +441,10 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
- cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
- cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
- cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
- cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
+ cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
+ cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
+ cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
+ cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
}
static struct its_collection *its_build_mapd_cmd(struct its_node *its,
@@ -676,6 +703,60 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
return valid_vpe(its, desc->its_vmovp_cmd.vpe);
}
+static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
+ desc->its_inv_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INV);
+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vint_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
+ desc->its_int_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INT);
+ its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
+ desc->its_clear_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_CLEAR);
+ its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -953,7 +1034,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
static void its_send_vmapti(struct its_device *dev, u32 id)
{
- struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc;
desc.its_vmapti_cmd.vpe = map->vpe;
@@ -967,7 +1048,7 @@ static void its_send_vmapti(struct its_device *dev, u32 id)
static void its_send_vmovi(struct its_device *dev, u32 id)
{
- struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc;
desc.its_vmovi_cmd.vpe = map->vpe;
@@ -1021,7 +1102,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
/* Emit VMOVPs */
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (!vpe->its_vm->vlpi_count[its->list_nr])
@@ -1042,29 +1123,71 @@ static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
}
+static void its_send_vinv(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINV command. This is just a normal INV,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_inv_cmd.dev = dev;
+ desc.its_inv_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
+}
+
+static void its_send_vint(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINT command. This is just a normal INT,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_int_cmd.dev = dev;
+ desc.its_int_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
+}
+
+static void its_send_vclear(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VCLEAR command. This is just a normal CLEAR,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_clear_cmd.dev = dev;
+ desc.its_clear_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
+}
+
/*
* irqchip functions - assumes MSI, mostly.
*/
-
-static inline u32 its_get_event_id(struct irq_data *d)
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- return d->hwirq - its_dev->event_map.lpi_base;
+ u32 event = its_get_event_id(d);
+
+ if (!irqd_is_forwarded_to_vcpu(d))
+ return NULL;
+
+ return dev_event_to_vlpi_map(its_dev, event);
}
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
+ struct its_vlpi_map *map = get_vlpi_map(d);
irq_hw_number_t hwirq;
void *va;
u8 *cfg;
- if (irqd_is_forwarded_to_vcpu(d)) {
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
- struct its_vlpi_map *map;
-
- va = page_address(its_dev->event_map.vm->vprop_page);
- map = &its_dev->event_map.vlpi_maps[event];
+ if (map) {
+ va = page_address(map->vm->vprop_page);
hwirq = map->vintid;
/* Remember the updated property */
@@ -1090,23 +1213,50 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
dsb(ishst);
}
+static void wait_for_syncr(void __iomem *rdbase)
+{
+ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ cpu_relax();
+}
+
+static void direct_lpi_inv(struct irq_data *d)
+{
+ struct its_collection *col;
+ void __iomem *rdbase;
+
+ /* Target the redistributor this LPI is currently routed to */
+ col = irq_to_col(d);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
+ gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
+
+ wait_for_syncr(rdbase);
+}
+
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set);
- its_send_inv(its_dev, its_get_event_id(d));
+ if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
+ direct_lpi_inv(d);
+ else if (!irqd_is_forwarded_to_vcpu(d))
+ its_send_inv(its_dev, its_get_event_id(d));
+ else
+ its_send_vinv(its_dev, its_get_event_id(d));
}
static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
- if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
+ map = dev_event_to_vlpi_map(its_dev, event);
+
+ if (map->db_enabled == enable)
return;
- its_dev->event_map.vlpi_maps[event].db_enabled = enable;
+ map->db_enabled = enable;
/*
* More fun with the architecture:
@@ -1208,10 +1358,17 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
- if (state)
- its_send_int(its_dev, event);
- else
- its_send_clear(its_dev, event);
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ if (state)
+ its_send_vint(its_dev, event);
+ else
+ its_send_vclear(its_dev, event);
+ } else {
+ if (state)
+ its_send_int(its_dev, event);
+ else
+ its_send_clear(its_dev, event);
+ }
return 0;
}
@@ -1279,13 +1436,13 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
if (!info->map)
return -EINVAL;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!maps) {
ret = -ENOMEM;
goto out;
@@ -1328,29 +1485,30 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
}
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
int ret = 0;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
+
+ map = get_vlpi_map(d);
- if (!its_dev->event_map.vm ||
- !its_dev->event_map.vlpi_maps[event].vm) {
+ if (!its_dev->event_map.vm || !map) {
ret = -EINVAL;
goto out;
}
/* Copy our mapping information to the incoming request */
- *info->map = its_dev->event_map.vlpi_maps[event];
+ *info->map = *map;
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
@@ -1360,7 +1518,7 @@ static int its_vlpi_unmap(struct irq_data *d)
u32 event = its_get_event_id(d);
int ret = 0;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
ret = -EINVAL;
@@ -1390,7 +1548,7 @@ static int its_vlpi_unmap(struct irq_data *d)
}
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
@@ -1416,7 +1574,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
struct its_cmd_info *info = vcpu_info;
/* Need a v4 ITS */
- if (!its_dev->its->is_v4)
+ if (!is_v4(its_dev->its))
return -EINVAL;
/* Unmap request? */
@@ -1922,9 +2080,9 @@ static bool its_parse_indirect_baser(struct its_node *its,
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
- pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
+ pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
&its->phys_base, its_base_type_string[type],
- its->device_ids, ids);
+ device_ids(its), ids);
}
*order = new_order;
@@ -1970,7 +2128,7 @@ static int its_alloc_tables(struct its_node *its)
case GITS_BASER_TYPE_DEVICE:
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
- its->device_ids);
+ device_ids(its));
break;
case GITS_BASER_TYPE_VCPU:
@@ -2361,7 +2519,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Don't allow device id that exceeds ITS hardware limit */
if (!baser)
- return (ilog2(dev_id) < its->device_ids);
+ return (ilog2(dev_id) < device_ids(its));
return its_alloc_table_entry(its, baser, dev_id);
}
@@ -2380,7 +2538,7 @@ static bool its_alloc_vpe_table(u32 vpe_id)
list_for_each_entry(its, &its_nodes, entry) {
struct its_baser *baser;
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
@@ -2419,7 +2577,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* sized as a power of two (and you need at least one bit...).
*/
nr_ites = max(2, nvecs);
- sz = nr_ites * its->ite_size;
+ sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
@@ -2450,7 +2608,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->event_map.col_map = col_map;
dev->event_map.lpi_base = lpi_base;
dev->event_map.nr_lpis = nr_lpis;
- mutex_init(&dev->event_map.vlpi_lock);
+ raw_spin_lock_init(&dev->event_map.vlpi_lock);
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
@@ -2471,6 +2629,7 @@ static void its_free_device(struct its_device *its_dev)
raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
+ kfree(its_dev->event_map.col_map);
kfree(its_dev->itt);
kfree(its_dev);
}
@@ -2679,7 +2838,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
its_lpi_free(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base,
its_dev->event_map.nr_lpis);
- kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
its_send_mapd(its_dev, 0);
@@ -2772,8 +2930,7 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ wait_for_syncr(rdbase);
return;
}
@@ -2869,7 +3026,7 @@ static void its_vpe_invall(struct its_vpe *vpe)
struct its_node *its;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
@@ -2927,10 +3084,10 @@ static void its_vpe_send_inv(struct irq_data *d)
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
+ /* Target the redistributor this VPE is currently known on */
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
- gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
+ wait_for_syncr(rdbase);
} else {
its_vpe_send_cmd(vpe, its_send_inv);
}
@@ -2972,8 +3129,7 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
} else {
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ wait_for_syncr(rdbase);
}
} else {
if (state)
@@ -3138,7 +3294,7 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
vpe->col_idx = cpumask_first(cpu_online_mask);
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
its_send_vmapp(its, vpe, true);
@@ -3164,7 +3320,7 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
return;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
its_send_vmapp(its, vpe, false);
@@ -3215,8 +3371,9 @@ static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
{
struct its_node *its = data;
- /* erratum 22375: only alloc 8MB table size */
- its->device_ids = 0x14; /* 20 bits, 8MB */
+ /* erratum 22375: only alloc 8MB table size (20 bits) */
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
return true;
@@ -3236,7 +3393,8 @@ static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
struct its_node *its = data;
/* On QDF2400, the size of the ITE is 16Bytes */
- its->ite_size = 16;
+ its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
+ its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
return true;
}
@@ -3270,8 +3428,10 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
its->get_msi_base = its_irq_get_msi_base_pre_its;
ids = ilog2(pre_its_window[1]) - 2;
- if (its->device_ids > ids)
- its->device_ids = ids;
+ if (device_ids(its) > ids) {
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
+ }
/* the pre-ITS breaks isolation, so disable MSI remapping */
its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
@@ -3504,7 +3664,7 @@ static int its_init_vpe_domain(void)
}
/* Use the last possible DevID */
- devid = GENMASK(its->device_ids - 1, 0);
+ devid = GENMASK(device_ids(its) - 1, 0);
vpe_proxy.dev = its_create_device(its, devid, entries, false);
if (!vpe_proxy.dev) {
kfree(vpe_proxy.vpes);
@@ -3602,12 +3762,10 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER);
+ its->typer = typer;
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
- its->device_ids = GITS_TYPER_DEVBITS(typer);
- its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
- if (its->is_v4) {
+ if (is_v4(its)) {
if (!(typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(res, its_base);
if (err < 0)
@@ -3674,7 +3832,7 @@ static int __init its_probe_one(struct resource *res,
gits_write_cwriter(0, its->base + GITS_CWRITER);
ctlr = readl_relaxed(its->base + GITS_CTLR);
ctlr |= GITS_CTLR_ENABLE;
- if (its->is_v4)
+ if (is_v4(its))
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
@@ -3999,7 +4157,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return err;
list_for_each_entry(its, &its_nodes, entry)
- has_v4 |= its->is_v4;
+ has_v4 |= is_v4(its);
if (has_v4 & rdists->has_vlpis) {
if (its_init_vpe_domain() ||
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6bb1f682f78b..d6218012097b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -183,7 +183,7 @@ static void gic_do_wait_for_rwp(void __iomem *base)
}
cpu_relax();
udelay(1);
- };
+ }
}
/* Wait for completion of a distributor change */
@@ -240,7 +240,7 @@ static void gic_enable_redist(bool enable)
break;
cpu_relax();
udelay(1);
- };
+ }
if (!count)
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index f126255b3260..01d18b39069e 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 platform IRQ support
+ * Ingenic XBurst platform IRQ support
*/
#include <linux/errno.h>
@@ -10,7 +10,6 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/ingenic.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/timex.h>
@@ -22,6 +21,7 @@
struct ingenic_intc_data {
void __iomem *base;
+ struct irq_domain *domain;
unsigned num_chips;
};
@@ -35,41 +35,30 @@ struct ingenic_intc_data {
static irqreturn_t intc_cascade(int irq, void *data)
{
struct ingenic_intc_data *intc = irq_get_handler_data(irq);
- uint32_t irq_reg;
+ struct irq_domain *domain = intc->domain;
+ struct irq_chip_generic *gc;
+ uint32_t pending;
unsigned i;
for (i = 0; i < intc->num_chips; i++) {
- irq_reg = readl(intc->base + (i * CHIP_SIZE) +
- JZ_REG_INTC_PENDING);
- if (!irq_reg)
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+
+ pending = irq_reg_readl(gc, JZ_REG_INTC_PENDING);
+ if (!pending)
continue;
- generic_handle_irq(__fls(irq_reg) + (i * 32) + JZ4740_IRQ_BASE);
+ while (pending) {
+ int bit = __fls(pending);
+
+ irq = irq_find_mapping(domain, bit + (i * 32));
+ generic_handle_irq(irq);
+ pending &= ~BIT(bit);
+ }
}
return IRQ_HANDLED;
}
-static void intc_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask)
-{
- struct irq_chip_regs *regs = &gc->chip_types->regs;
-
- writel(mask, gc->reg_base + regs->enable);
- writel(~mask, gc->reg_base + regs->disable);
-}
-
-void ingenic_intc_irq_suspend(struct irq_data *data)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- intc_irq_set_mask(gc, gc->wake_active);
-}
-
-void ingenic_intc_irq_resume(struct irq_data *data)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- intc_irq_set_mask(gc, gc->mask_cache);
-}
-
static struct irqaction intc_cascade_action = {
.handler = intc_cascade,
.name = "SoC intc cascade interrupt",
@@ -108,17 +97,27 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- for (i = 0; i < num_chips; i++) {
- /* Mask all irqs */
- writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
- JZ_REG_INTC_SET_MASK);
+ domain = irq_domain_add_legacy(node, num_chips * 32,
+ JZ4740_IRQ_BASE, 0,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ err = -ENOMEM;
+ goto out_unmap_base;
+ }
- gc = irq_alloc_generic_chip("INTC", 1,
- JZ4740_IRQ_BASE + (i * 32),
- intc->base + (i * CHIP_SIZE),
- handle_level_irq);
+ intc->domain = domain;
+
+ err = irq_alloc_domain_generic_chips(domain, 32, 1, "INTC",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (err)
+ goto out_domain_remove;
+
+ for (i = 0; i < num_chips; i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
gc->wake_enabled = IRQ_MSK(32);
+ gc->reg_base = intc->base + (i * CHIP_SIZE);
ct = gc->chip_types;
ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
@@ -127,21 +126,19 @@ static int __init ingenic_intc_of_init(struct device_node *node,
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_wake = irq_gc_set_wake;
- ct->chip.irq_suspend = ingenic_intc_irq_suspend;
- ct->chip.irq_resume = ingenic_intc_irq_resume;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
- irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0,
- IRQ_NOPROBE | IRQ_LEVEL);
+ /* Mask all irqs */
+ irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
- domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
- &irq_domain_simple_ops, NULL);
- if (!domain)
- pr_warn("unable to register IRQ domain\n");
-
setup_irq(parent_irq, &intc_cascade_action);
return 0;
+out_domain_remove:
+ irq_domain_remove(domain);
+out_unmap_base:
+ iounmap(intc->base);
out_unmap_irq:
irq_dispose_mapping(parent_irq);
out_free:
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
new file mode 100644
index 000000000000..4d1179fed77c
--- /dev/null
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "irq-ls-extirq: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MAXIRQ 12
+#define LS1021A_SCFGREVCR 0x200
+
+struct ls_extirq_data {
+ struct regmap *syscon;
+ u32 intpcr;
+ bool bit_reverse;
+ u32 nirq;
+ struct irq_fwspec map[MAXIRQ];
+};
+
+static int
+ls_extirq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct ls_extirq_data *priv = data->chip_data;
+ irq_hw_number_t hwirq = data->hwirq;
+ u32 value, mask;
+
+ if (priv->bit_reverse)
+ mask = 1U << (31 - hwirq);
+ else
+ mask = 1U << hwirq;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ value = mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ value = mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_EDGE_RISING:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ regmap_update_bits(priv->syscon, priv->intpcr, mask, value);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip ls_extirq_chip = {
+ .name = "ls-extirq",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = ls_extirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int
+ls_extirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct ls_extirq_data *priv = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ hwirq = fwspec->param[0];
+ if (hwirq >= priv->nirq)
+ return -EINVAL;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &ls_extirq_chip,
+ priv);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]);
+}
+
+static const struct irq_domain_ops extirq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .alloc = ls_extirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int
+ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
+{
+ const __be32 *map;
+ u32 mapsize;
+ int ret;
+
+ map = of_get_property(node, "interrupt-map", &mapsize);
+ if (!map)
+ return -ENOENT;
+ if (mapsize % sizeof(*map))
+ return -EINVAL;
+ mapsize /= sizeof(*map);
+
+ while (mapsize) {
+ struct device_node *ipar;
+ u32 hwirq, intsize, j;
+
+ if (mapsize < 3)
+ return -EINVAL;
+ hwirq = be32_to_cpup(map);
+ if (hwirq >= MAXIRQ)
+ return -EINVAL;
+ priv->nirq = max(priv->nirq, hwirq + 1);
+
+ ipar = of_find_node_by_phandle(be32_to_cpup(map + 2));
+ map += 3;
+ mapsize -= 3;
+ if (!ipar)
+ return -EINVAL;
+ priv->map[hwirq].fwnode = &ipar->fwnode;
+ ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
+ if (ret)
+ return ret;
+
+ if (intsize > mapsize)
+ return -EINVAL;
+
+ priv->map[hwirq].param_count = intsize;
+ for (j = 0; j < intsize; ++j)
+ priv->map[hwirq].param[j] = be32_to_cpup(map++);
+ mapsize -= intsize;
+ }
+ return 0;
+}
+
+static int __init
+ls_extirq_of_init(struct device_node *node, struct device_node *parent)
+{
+
+ struct irq_domain *domain, *parent_domain;
+ struct ls_extirq_data *priv;
+ int ret;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Cannot find parent domain\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->syscon = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(priv->syscon)) {
+ ret = PTR_ERR(priv->syscon);
+ pr_err("Failed to lookup parent regmap\n");
+ goto out;
+ }
+ ret = of_property_read_u32(node, "reg", &priv->intpcr);
+ if (ret) {
+ pr_err("Missing INTPCR offset value\n");
+ goto out;
+ }
+
+ ret = ls_extirq_parse_map(priv, node);
+ if (ret)
+ goto out;
+
+ if (of_device_is_compatible(node, "fsl,ls1021a-extirq")) {
+ u32 revcr;
+
+ ret = regmap_read(priv->syscon, LS1021A_SCFGREVCR, &revcr);
+ if (ret)
+ goto out;
+ priv->bit_reverse = (revcr != 0);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
+ &extirq_domain_ops, priv);
+ if (!domain)
+ ret = -ENOMEM;
+
+out:
+ if (ret)
+ kfree(priv);
+ return ret;
+}
+
+IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index ef4d625d2d80..8f6e6b08eadf 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -246,8 +246,8 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *d
/* No free bits available. Allocate a new vint */
vint_desc = ti_sci_inta_alloc_parent_irq(domain);
if (IS_ERR(vint_desc)) {
- mutex_unlock(&inta->vint_mutex);
- return ERR_PTR(PTR_ERR(vint_desc));
+ event_desc = ERR_CAST(vint_desc);
+ goto unlock;
}
free_bit = find_first_zero_bit(vint_desc->event_map,
@@ -259,6 +259,7 @@ alloc_event:
if (IS_ERR(event_desc))
clear_bit(free_bit, vint_desc->event_map);
+unlock:
mutex_unlock(&inta->vint_mutex);
return event_desc;
}
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 5a7efeb3892d..84163f1ebfcf 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -51,7 +51,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT);
handle_domain_irq(zevio_irq_domain, irqnr, regs);
- };
+ }
}
static void __init zevio_init_irq_base(void __iomem *base)
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index faa7d61b9d6c..6ae9e1f0819d 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
@@ -13,12 +14,13 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
-#define PDC_MAX_IRQS 126
+#define PDC_MAX_IRQS 168
+#define PDC_MAX_GPIO_IRQS 256
#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
@@ -26,6 +28,8 @@
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
+#define PDC_NO_PARENT_IRQ ~0UL
+
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
@@ -47,6 +51,26 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
+static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ return irq_chip_get_parent_state(d, which, state);
+}
+
+static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool value)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ return irq_chip_set_parent_state(d, which, value);
+}
+
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
@@ -63,15 +87,37 @@ static void pdc_enable_intr(struct irq_data *d, bool on)
raw_spin_unlock(&pdc_lock);
}
-static void qcom_pdc_gic_mask(struct irq_data *d)
+static void qcom_pdc_gic_disable(struct irq_data *d)
{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
pdc_enable_intr(d, false);
+ irq_chip_disable_parent(d);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
+ pdc_enable_intr(d, true);
+ irq_chip_enable_parent(d);
+}
+
+static void qcom_pdc_gic_mask(struct irq_data *d)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
irq_chip_mask_parent(d);
}
static void qcom_pdc_gic_unmask(struct irq_data *d)
{
- pdc_enable_intr(d, true);
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
irq_chip_unmask_parent(d);
}
@@ -114,6 +160,9 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
int pin_out = d->hwirq;
enum pdc_irq_config_bits pdc_type;
+ if (pin_out == GPIO_NO_WAKE_IRQ)
+ return 0;
+
switch (type) {
case IRQ_TYPE_EDGE_RISING:
pdc_type = PDC_EDGE_RISING;
@@ -148,6 +197,10 @@ static struct irq_chip qcom_pdc_gic_chip = {
.irq_eoi = irq_chip_eoi_parent,
.irq_mask = qcom_pdc_gic_mask,
.irq_unmask = qcom_pdc_gic_unmask,
+ .irq_disable = qcom_pdc_gic_disable,
+ .irq_enable = qcom_pdc_gic_enable,
+ .irq_get_irqchip_state = qcom_pdc_gic_get_irqchip_state,
+ .irq_set_irqchip_state = qcom_pdc_gic_set_irqchip_state,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = qcom_pdc_gic_set_type,
.flags = IRQCHIP_MASK_ON_SUSPEND |
@@ -169,8 +222,7 @@ static irq_hw_number_t get_parent_hwirq(int pin)
return (region->parent_base + pin - region->pin_base);
}
- WARN_ON(1);
- return ~0UL;
+ return PDC_NO_PARENT_IRQ;
}
static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
@@ -199,17 +251,17 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
if (ret)
- return -EINVAL;
-
- parent_hwirq = get_parent_hwirq(hwirq);
- if (parent_hwirq == ~0UL)
- return -EINVAL;
+ return ret;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&qcom_pdc_gic_chip, NULL);
if (ret)
return ret;
+ parent_hwirq = get_parent_hwirq(hwirq);
+ if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ return 0;
+
if (type & IRQ_TYPE_EDGE_BOTH)
type = IRQ_TYPE_EDGE_RISING;
@@ -232,6 +284,60 @@ static const struct irq_domain_ops qcom_pdc_ops = {
.free = irq_domain_free_irqs_common,
};
+static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq, parent_hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &qcom_pdc_gic_chip, NULL);
+ if (ret)
+ return ret;
+
+ if (hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ parent_hwirq = get_parent_hwirq(hwirq);
+ if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ return 0;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = parent_hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ return bus_token == DOMAIN_BUS_WAKEUP;
+}
+
+static const struct irq_domain_ops qcom_pdc_gpio_ops = {
+ .select = qcom_pdc_gpio_domain_select,
+ .alloc = qcom_pdc_gpio_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
static int pdc_setup_pin_mapping(struct device_node *np)
{
int ret, n;
@@ -270,7 +376,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
- struct irq_domain *parent_domain, *pdc_domain;
+ struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
int ret;
pdc_base = of_iomap(node, 0);
@@ -301,12 +407,27 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
goto fail;
}
+ pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
+ IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
+ PDC_MAX_GPIO_IRQS,
+ of_fwnode_handle(node),
+ &qcom_pdc_gpio_ops, NULL);
+ if (!pdc_gpio_domain) {
+ pr_err("%pOF: PDC domain add failed for GPIO domain\n", node);
+ ret = -ENOMEM;
+ goto remove;
+ }
+
+ irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
+
return 0;
+remove:
+ irq_domain_remove(pdc_domain);
fail:
kfree(pdc_region);
iounmap(pdc_base);
return ret;
}
-IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init);
+IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2ae0c1913766..0a2cc197f62b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1954,12 +1954,14 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
/*
* For a zoned target, the number of zones should be updated for the
* correct value to be exposed in sysfs queue/nr_zones. For a BIO based
- * target, this is all that is needed. For a request based target, the
- * queue zone bitmaps must also be updated.
- * Use blk_revalidate_disk_zones() to handle this.
+ * target, this is all that is needed.
*/
- if (blk_queue_is_zoned(q))
- blk_revalidate_disk_zones(t->md->disk);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (blk_queue_is_zoned(q)) {
+ WARN_ON_ONCE(queue_is_mq(q));
+ q->nr_zones = blkdev_nr_zones(t->md->disk);
+ }
+#endif
/* Allow reads to exceed readahead limits */
q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 4574e0dedbd6..70a1063161c0 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -727,7 +727,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
- dev->nr_zones = blkdev_nr_zones(dev->bdev);
+ dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
dmz->dev = dev;
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 0322df9dc249..14386d0b5f57 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EBI driver for Atmel chips
* inspired by the fsl weim bus driver
*
* Copyright (C) 2013 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -19,6 +16,8 @@
#include <linux/regmap.h>
#include <soc/at91/atmel-sfr.h>
+#define AT91_EBI_NUM_CS 8
+
struct atmel_ebi_dev_config {
int cs;
struct atmel_smc_cs_conf smcconf;
@@ -314,7 +313,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
if (ret)
return ret;
- if (cs >= AT91_MATRIX_EBI_NUM_CS ||
+ if (cs >= AT91_EBI_NUM_CS ||
!(ebi->caps->available_cs & BIT(cs))) {
dev_err(dev, "invalid reg property in %pOF\n", np);
return -EINVAL;
@@ -344,7 +343,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
apply = true;
i = 0;
- for_each_set_bit(cs, &cslines, AT91_MATRIX_EBI_NUM_CS) {
+ for_each_set_bit(cs, &cslines, AT91_EBI_NUM_CS) {
ebid->configs[i].cs = cs;
if (apply) {
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 6827ed484750..82b415be18d1 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -127,7 +127,6 @@ enum dpfe_msg_fields {
MSG_COMMAND,
MSG_ARG_COUNT,
MSG_ARG0,
- MSG_CHKSUM,
MSG_FIELD_MAX = 16 /* Max number of arguments */
};
@@ -180,7 +179,7 @@ struct dpfe_api {
};
/* Things we need for as long as we are active. */
-struct private_data {
+struct brcmstb_dpfe_priv {
void __iomem *regs;
void __iomem *dmem;
void __iomem *imem;
@@ -232,9 +231,13 @@ static struct attribute *dpfe_v3_attrs[] = {
};
ATTRIBUTE_GROUPS(dpfe_v3);
-/* API v2 firmware commands */
-static const struct dpfe_api dpfe_api_v2 = {
- .version = 2,
+/*
+ * Old API v2 firmware commands, as defined in the rev 0.61 specification, we
+ * use a version set to 1 to denote that it is not compatible with the new API
+ * v2 and onwards.
+ */
+static const struct dpfe_api dpfe_api_old_v2 = {
+ .version = 1,
.fw_name = "dpfe.bin",
.sysfs_attrs = dpfe_v2_groups,
.command = {
@@ -243,21 +246,42 @@ static const struct dpfe_api dpfe_api_v2 = {
[MSG_COMMAND] = 1,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 1,
- [MSG_CHKSUM] = 4,
},
[DPFE_CMD_GET_REFRESH] = {
[MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
[MSG_COMMAND] = 2,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 1,
- [MSG_CHKSUM] = 5,
},
[DPFE_CMD_GET_VENDOR] = {
[MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
[MSG_COMMAND] = 2,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 2,
- [MSG_CHKSUM] = 6,
+ },
+ }
+};
+
+/*
+ * API v2 firmware commands, as defined in the rev 0.8 specification, named new
+ * v2 here
+ */
+static const struct dpfe_api dpfe_api_new_v2 = {
+ .version = 2,
+ .fw_name = NULL, /* We expect the firmware to have been downloaded! */
+ .sysfs_attrs = dpfe_v2_groups,
+ .command = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x101,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x201,
+ },
+ [DPFE_CMD_GET_VENDOR] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x202,
},
}
};
@@ -273,49 +297,51 @@ static const struct dpfe_api dpfe_api_v3 = {
[MSG_COMMAND] = 0x0101,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 1,
- [MSG_CHKSUM] = 0x104,
},
[DPFE_CMD_GET_REFRESH] = {
[MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
[MSG_COMMAND] = 0x0202,
[MSG_ARG_COUNT] = 0,
- /*
- * This is a bit ugly. Without arguments, the checksum
- * follows right after the argument count and not at
- * offset MSG_CHKSUM.
- */
- [MSG_ARG0] = 0x203,
},
/* There's no GET_VENDOR command in API v3. */
},
};
-static bool is_dcpu_enabled(void __iomem *regs)
+static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
{
u32 val;
- val = readl_relaxed(regs + REG_DCPU_RESET);
+ mutex_lock(&priv->lock);
+ val = readl_relaxed(priv->regs + REG_DCPU_RESET);
+ mutex_unlock(&priv->lock);
return !(val & DCPU_RESET_MASK);
}
-static void __disable_dcpu(void __iomem *regs)
+static void __disable_dcpu(struct brcmstb_dpfe_priv *priv)
{
u32 val;
- if (!is_dcpu_enabled(regs))
+ if (!is_dcpu_enabled(priv))
return;
+ mutex_lock(&priv->lock);
+
/* Put DCPU in reset if it's running. */
- val = readl_relaxed(regs + REG_DCPU_RESET);
+ val = readl_relaxed(priv->regs + REG_DCPU_RESET);
val |= (1 << DCPU_RESET_SHIFT);
- writel_relaxed(val, regs + REG_DCPU_RESET);
+ writel_relaxed(val, priv->regs + REG_DCPU_RESET);
+
+ mutex_unlock(&priv->lock);
}
-static void __enable_dcpu(void __iomem *regs)
+static void __enable_dcpu(struct brcmstb_dpfe_priv *priv)
{
+ void __iomem *regs = priv->regs;
u32 val;
+ mutex_lock(&priv->lock);
+
/* Clear mailbox registers. */
writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
writel_relaxed(0, regs + REG_TO_HOST_MBOX);
@@ -329,6 +355,8 @@ static void __enable_dcpu(void __iomem *regs)
val = readl_relaxed(regs + REG_DCPU_RESET);
val &= ~(1 << DCPU_RESET_SHIFT);
writel_relaxed(val, regs + REG_DCPU_RESET);
+
+ mutex_unlock(&priv->lock);
}
static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
@@ -343,7 +371,7 @@ static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
return sum;
}
-static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
+static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
char *buf, ssize_t *size)
{
unsigned int msg_type;
@@ -382,7 +410,7 @@ static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
return ptr;
}
-static void __finalize_command(struct private_data *priv)
+static void __finalize_command(struct brcmstb_dpfe_priv *priv)
{
unsigned int release_mbox;
@@ -390,12 +418,12 @@ static void __finalize_command(struct private_data *priv)
* It depends on the API version which MBOX register we have to write to
* to signal we are done.
*/
- release_mbox = (priv->dpfe_api->version < 3)
+ release_mbox = (priv->dpfe_api->version < 2)
? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
writel_relaxed(0, priv->regs + release_mbox);
}
-static int __send_command(struct private_data *priv, unsigned int cmd,
+static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
u32 result[])
{
const u32 *msg = priv->dpfe_api->command[cmd];
@@ -421,9 +449,17 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
return -ETIMEDOUT;
}
+ /* Compute checksum over the message */
+ chksum_idx = msg[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
+ chksum = get_msg_chksum(msg, chksum_idx);
+
/* Write command and arguments to message area */
- for (i = 0; i < MSG_FIELD_MAX; i++)
- writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
+ for (i = 0; i < MSG_FIELD_MAX; i++) {
+ if (i == chksum_idx)
+ writel_relaxed(chksum, regs + DCPU_MSG_RAM(i));
+ else
+ writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
+ }
/* Tell DCPU there is a command waiting */
writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
@@ -517,7 +553,7 @@ static int __verify_firmware(struct init_data *init,
/* Verify checksum by reading back the firmware from co-processor RAM. */
static int __verify_fw_checksum(struct init_data *init,
- struct private_data *priv,
+ struct brcmstb_dpfe_priv *priv,
const struct dpfe_firmware_header *header,
u32 checksum)
{
@@ -571,26 +607,23 @@ static int __write_firmware(u32 __iomem *mem, const u32 *fw,
return 0;
}
-static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
- struct init_data *init)
+static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
{
const struct dpfe_firmware_header *header;
unsigned int dmem_size, imem_size;
- struct device *dev = &pdev->dev;
+ struct device *dev = priv->dev;
bool is_big_endian = false;
- struct private_data *priv;
const struct firmware *fw;
const u32 *dmem, *imem;
+ struct init_data init;
const void *fw_blob;
int ret;
- priv = platform_get_drvdata(pdev);
-
/*
* Skip downloading the firmware if the DCPU is already running and
* responding to commands.
*/
- if (is_dcpu_enabled(priv->regs)) {
+ if (is_dcpu_enabled(priv)) {
u32 response[MSG_FIELD_MAX];
ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
@@ -606,20 +639,23 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
if (!priv->dpfe_api->fw_name)
return -ENODEV;
- ret = request_firmware(&fw, priv->dpfe_api->fw_name, dev);
- /* request_firmware() prints its own error messages. */
+ ret = firmware_request_nowarn(&fw, priv->dpfe_api->fw_name, dev);
+ /*
+ * Defer the firmware download if the firmware file couldn't be found.
+ * The root file system may not be available yet.
+ */
if (ret)
- return ret;
+ return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
- ret = __verify_firmware(init, fw);
+ ret = __verify_firmware(&init, fw);
if (ret)
return -EFAULT;
- __disable_dcpu(priv->regs);
+ __disable_dcpu(priv);
- is_big_endian = init->is_big_endian;
- dmem_size = init->dmem_len;
- imem_size = init->imem_len;
+ is_big_endian = init.is_big_endian;
+ dmem_size = init.dmem_len;
+ imem_size = init.imem_len;
/* At the beginning of the firmware blob is a header. */
header = (struct dpfe_firmware_header *)fw->data;
@@ -637,17 +673,17 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
if (ret)
return ret;
- ret = __verify_fw_checksum(init, priv, header, init->chksum);
+ ret = __verify_fw_checksum(&init, priv, header, init.chksum);
if (ret)
return ret;
- __enable_dcpu(priv->regs);
+ __enable_dcpu(priv);
return 0;
}
static ssize_t generic_show(unsigned int command, u32 response[],
- struct private_data *priv, char *buf)
+ struct brcmstb_dpfe_priv *priv, char *buf)
{
int ret;
@@ -665,7 +701,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
char *buf)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
unsigned int info;
ssize_t ret;
@@ -688,7 +724,7 @@ static ssize_t show_refresh(struct device *dev,
{
u32 response[MSG_FIELD_MAX];
void __iomem *info;
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
u8 refresh, sr_abort, ppre, thermal_offs, tuf;
u32 mr4;
ssize_t ret;
@@ -721,7 +757,7 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
void __iomem *info;
unsigned long val;
int ret;
@@ -747,7 +783,7 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
char *buf)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
void __iomem *info;
ssize_t ret;
u32 mr5, mr6, mr7, mr8, err;
@@ -778,7 +814,7 @@ static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
char *buf)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
ssize_t ret;
u32 mr4, mr5, mr6, mr7, mr8, err;
@@ -800,16 +836,15 @@ static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
static int brcmstb_dpfe_resume(struct platform_device *pdev)
{
- struct init_data init;
+ struct brcmstb_dpfe_priv *priv = platform_get_drvdata(pdev);
- return brcmstb_dpfe_download_firmware(pdev, &init);
+ return brcmstb_dpfe_download_firmware(priv);
}
static int brcmstb_dpfe_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct private_data *priv;
- struct init_data init;
+ struct brcmstb_dpfe_priv *priv;
struct resource *res;
int ret;
@@ -817,6 +852,8 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = dev;
+
mutex_init(&priv->lock);
platform_set_drvdata(pdev, priv);
@@ -851,9 +888,10 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = brcmstb_dpfe_download_firmware(pdev, &init);
+ ret = brcmstb_dpfe_download_firmware(priv);
if (ret) {
- dev_err(dev, "Couldn't download firmware -- %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't download firmware -- %d\n", ret);
return ret;
}
@@ -867,7 +905,7 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
static int brcmstb_dpfe_remove(struct platform_device *pdev)
{
- struct private_data *priv = dev_get_drvdata(&pdev->dev);
+ struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
@@ -876,10 +914,10 @@ static int brcmstb_dpfe_remove(struct platform_device *pdev)
static const struct of_device_id brcmstb_dpfe_of_match[] = {
/* Use legacy API v2 for a select number of chips */
- { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_v2 },
- { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_v2 },
- { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_v2 },
- { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_v2 },
+ { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_new_v2 },
/* API v3 is the default going forward */
{ .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
{}
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 402c6bc8e621..9d9127bf2a59 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -1613,7 +1613,7 @@ static void emif_shutdown(struct platform_device *pdev)
static int get_emif_reg_values(struct emif_data *emif, u32 freq,
struct emif_regs *regs)
{
- u32 cs1_used, ip_rev, phy_type;
+ u32 ip_rev, phy_type;
u32 cl, type;
const struct lpddr2_timings *timings;
const struct lpddr2_min_tck *min_tck;
@@ -1621,7 +1621,6 @@ static int get_emif_reg_values(struct emif_data *emif, u32 freq,
const struct lpddr2_addressing *addressing;
struct emif_data *emif_for_calc;
struct device *dev;
- const struct emif_custom_configs *custom_configs;
dev = emif->dev;
/*
@@ -1639,12 +1638,10 @@ static int get_emif_reg_values(struct emif_data *emif, u32 freq,
device_info = emif_for_calc->plat_data->device_info;
type = device_info->type;
- cs1_used = device_info->cs1_used;
ip_rev = emif_for_calc->plat_data->ip_rev;
phy_type = emif_for_calc->plat_data->phy_type;
min_tck = emif_for_calc->plat_data->min_tck;
- custom_configs = emif_for_calc->plat_data->custom_configs;
set_ddr_clk_period(freq);
diff --git a/drivers/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
index 4a21b5044ff8..e59ccbd982d0 100644
--- a/drivers/memory/jedec_ddr.h
+++ b/drivers/memory/jedec_ddr.h
@@ -29,6 +29,7 @@
#define DDR_TYPE_LPDDR2_S4 3
#define DDR_TYPE_LPDDR2_S2 4
#define DDR_TYPE_LPDDR2_NVM 5
+#define DDR_TYPE_LPDDR3 6
/* DDR IO width */
#define DDR_IO_WIDTH_4 1
@@ -169,4 +170,64 @@ extern const struct lpddr2_timings
lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+/*
+ * Structure for timings for LPDDR3 based on LPDDR2 plus additional fields.
+ * All parameters are in pico seconds(ps) excluding max_freq, min_freq which
+ * are in Hz.
+ */
+struct lpddr3_timings {
+ u32 max_freq;
+ u32 min_freq;
+ u32 tRFC;
+ u32 tRRD;
+ u32 tRPab;
+ u32 tRPpb;
+ u32 tRCD;
+ u32 tRC;
+ u32 tRAS;
+ u32 tWTR;
+ u32 tWR;
+ u32 tRTP;
+ u32 tW2W_C2C;
+ u32 tR2R_C2C;
+ u32 tWL;
+ u32 tDQSCK;
+ u32 tRL;
+ u32 tFAW;
+ u32 tXSR;
+ u32 tXP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tMRD;
+};
+
+/*
+ * Min value for some parameters in terms of number of tCK cycles(nCK)
+ * Please set to zero parameters that are not valid for a given memory
+ * type
+ */
+struct lpddr3_min_tck {
+ u32 tRFC;
+ u32 tRRD;
+ u32 tRPab;
+ u32 tRPpb;
+ u32 tRCD;
+ u32 tRC;
+ u32 tRAS;
+ u32 tWTR;
+ u32 tWR;
+ u32 tRTP;
+ u32 tW2W_C2C;
+ u32 tR2R_C2C;
+ u32 tWL;
+ u32 tDQSCK;
+ u32 tRL;
+ u32 tFAW;
+ u32 tXSR;
+ u32 tXP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tMRD;
+};
+
#endif /* __JEDEC_DDR_H */
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 439d7d886873..a113e811faab 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
static const struct dev_pm_ops smi_larb_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_larb_driver = {
@@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
static const struct dev_pm_ops smi_common_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_common_driver = {
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 46539b27a3fb..71f26eac7350 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -3,6 +3,7 @@
* OpenFirmware helpers for memory drivers
*
* Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
*/
#include <linux/device.h>
@@ -149,3 +150,151 @@ default_timings:
return lpddr2_jedec_timings;
}
EXPORT_SYMBOL(of_get_ddr_timings);
+
+/**
+ * of_lpddr3_get_min_tck() - extract min timing values for lpddr3
+ * @np: pointer to ddr device tree node
+ * @device: device requesting for min timing values
+ *
+ * Populates the lpddr3_min_tck structure by extracting data
+ * from device tree node. Returns a pointer to the populated
+ * structure. If any error in populating the structure, returns NULL.
+ */
+const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
+ struct device *dev)
+{
+ int ret = 0;
+ struct lpddr3_min_tck *min;
+
+ min = devm_kzalloc(dev, sizeof(*min), GFP_KERNEL);
+ if (!min)
+ goto default_min_tck;
+
+ ret |= of_property_read_u32(np, "tRFC-min-tck", &min->tRFC);
+ ret |= of_property_read_u32(np, "tRRD-min-tck", &min->tRRD);
+ ret |= of_property_read_u32(np, "tRPab-min-tck", &min->tRPab);
+ ret |= of_property_read_u32(np, "tRPpb-min-tck", &min->tRPpb);
+ ret |= of_property_read_u32(np, "tRCD-min-tck", &min->tRCD);
+ ret |= of_property_read_u32(np, "tRC-min-tck", &min->tRC);
+ ret |= of_property_read_u32(np, "tRAS-min-tck", &min->tRAS);
+ ret |= of_property_read_u32(np, "tWTR-min-tck", &min->tWTR);
+ ret |= of_property_read_u32(np, "tWR-min-tck", &min->tWR);
+ ret |= of_property_read_u32(np, "tRTP-min-tck", &min->tRTP);
+ ret |= of_property_read_u32(np, "tW2W-C2C-min-tck", &min->tW2W_C2C);
+ ret |= of_property_read_u32(np, "tR2R-C2C-min-tck", &min->tR2R_C2C);
+ ret |= of_property_read_u32(np, "tWL-min-tck", &min->tWL);
+ ret |= of_property_read_u32(np, "tDQSCK-min-tck", &min->tDQSCK);
+ ret |= of_property_read_u32(np, "tRL-min-tck", &min->tRL);
+ ret |= of_property_read_u32(np, "tFAW-min-tck", &min->tFAW);
+ ret |= of_property_read_u32(np, "tXSR-min-tck", &min->tXSR);
+ ret |= of_property_read_u32(np, "tXP-min-tck", &min->tXP);
+ ret |= of_property_read_u32(np, "tCKE-min-tck", &min->tCKE);
+ ret |= of_property_read_u32(np, "tCKESR-min-tck", &min->tCKESR);
+ ret |= of_property_read_u32(np, "tMRD-min-tck", &min->tMRD);
+
+ if (ret) {
+ dev_warn(dev, "%s: errors while parsing min-tck values\n",
+ __func__);
+ devm_kfree(dev, min);
+ goto default_min_tck;
+ }
+
+ return min;
+
+default_min_tck:
+ dev_warn(dev, "%s: using default min-tck values\n", __func__);
+ return NULL;
+}
+EXPORT_SYMBOL(of_lpddr3_get_min_tck);
+
+static int of_lpddr3_do_get_timings(struct device_node *np,
+ struct lpddr3_timings *tim)
+{
+ int ret;
+
+ /* The 'reg' param required since DT has changed, used as 'max-freq' */
+ ret = of_property_read_u32(np, "reg", &tim->max_freq);
+ ret |= of_property_read_u32(np, "min-freq", &tim->min_freq);
+ ret |= of_property_read_u32(np, "tRFC", &tim->tRFC);
+ ret |= of_property_read_u32(np, "tRRD", &tim->tRRD);
+ ret |= of_property_read_u32(np, "tRPab", &tim->tRPab);
+ ret |= of_property_read_u32(np, "tRPpb", &tim->tRPpb);
+ ret |= of_property_read_u32(np, "tRCD", &tim->tRCD);
+ ret |= of_property_read_u32(np, "tRC", &tim->tRC);
+ ret |= of_property_read_u32(np, "tRAS", &tim->tRAS);
+ ret |= of_property_read_u32(np, "tWTR", &tim->tWTR);
+ ret |= of_property_read_u32(np, "tWR", &tim->tWR);
+ ret |= of_property_read_u32(np, "tRTP", &tim->tRTP);
+ ret |= of_property_read_u32(np, "tW2W-C2C", &tim->tW2W_C2C);
+ ret |= of_property_read_u32(np, "tR2R-C2C", &tim->tR2R_C2C);
+ ret |= of_property_read_u32(np, "tFAW", &tim->tFAW);
+ ret |= of_property_read_u32(np, "tXSR", &tim->tXSR);
+ ret |= of_property_read_u32(np, "tXP", &tim->tXP);
+ ret |= of_property_read_u32(np, "tCKE", &tim->tCKE);
+ ret |= of_property_read_u32(np, "tCKESR", &tim->tCKESR);
+ ret |= of_property_read_u32(np, "tMRD", &tim->tMRD);
+
+ return ret;
+}
+
+/**
+ * of_lpddr3_get_ddr_timings() - extracts the lpddr3 timings and updates no of
+ * frequencies available.
+ * @np_ddr: Pointer to ddr device tree node
+ * @dev: Device requesting for ddr timings
+ * @device_type: Type of ddr
+ * @nr_frequencies: No of frequencies available for ddr
+ * (updated by this function)
+ *
+ * Populates lpddr3_timings structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If any error
+ * while populating, returns NULL.
+ */
+const struct lpddr3_timings
+*of_lpddr3_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
+ u32 device_type, u32 *nr_frequencies)
+{
+ struct lpddr3_timings *timings = NULL;
+ u32 arr_sz = 0, i = 0;
+ struct device_node *np_tim;
+ char *tim_compat = NULL;
+
+ switch (device_type) {
+ case DDR_TYPE_LPDDR3:
+ tim_compat = "jedec,lpddr3-timings";
+ break;
+ default:
+ dev_warn(dev, "%s: un-supported memory type\n", __func__);
+ }
+
+ for_each_child_of_node(np_ddr, np_tim)
+ if (of_device_is_compatible(np_tim, tim_compat))
+ arr_sz++;
+
+ if (arr_sz)
+ timings = devm_kcalloc(dev, arr_sz, sizeof(*timings),
+ GFP_KERNEL);
+
+ if (!timings)
+ goto default_timings;
+
+ for_each_child_of_node(np_ddr, np_tim) {
+ if (of_device_is_compatible(np_tim, tim_compat)) {
+ if (of_lpddr3_do_get_timings(np_tim, &timings[i])) {
+ devm_kfree(dev, timings);
+ goto default_timings;
+ }
+ i++;
+ }
+ }
+
+ *nr_frequencies = arr_sz;
+
+ return timings;
+
+default_timings:
+ dev_warn(dev, "%s: failed to get timings\n", __func__);
+ *nr_frequencies = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(of_lpddr3_get_ddr_timings);
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index b077cc836b0b..e39ecc4c733d 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -14,6 +14,11 @@ extern const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
extern const struct lpddr2_timings
*of_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
u32 device_type, u32 *nr_frequencies);
+extern const struct lpddr3_min_tck
+ *of_lpddr3_get_min_tck(struct device_node *np, struct device *dev);
+extern const struct lpddr3_timings
+ *of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies);
#else
static inline const struct lpddr2_min_tck
*of_get_min_tck(struct device_node *np, struct device *dev)
@@ -27,6 +32,19 @@ static inline const struct lpddr2_timings
{
return NULL;
}
+
+static inline const struct lpddr3_min_tck
+ *of_lpddr3_get_min_tck(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
+
+static inline const struct lpddr3_timings
+ *of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies)
+{
+ return NULL;
+}
#endif /* CONFIG_OF && CONFIG_DDR */
#endif /* __LINUX_MEMORY_OF_REG_ */
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 79ce7ea58903..e9c3ce92350c 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -7,6 +7,19 @@ config SAMSUNG_MC
if SAMSUNG_MC
+config EXYNOS5422_DMC
+ tristate "EXYNOS5422 Dynamic Memory Controller driver"
+ depends on ARCH_EXYNOS || (COMPILE_TEST && HAS_IOMEM)
+ select DDR
+ depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
+ depends on (PM_DEVFREQ && PM_DEVFREQ_EVENT)
+ help
+ This adds driver for Exynos5422 DMC (Dynamic Memory Controller).
+ The driver provides support for Dynamic Voltage and Frequency Scaling in
+ DMC and DRAM. It also supports changing timings of DRAM running with
+ different frequency. The timings are calculated based on DT memory
+ information.
+
config EXYNOS_SROM
bool "Exynos SROM controller driver" if COMPILE_TEST
depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
diff --git a/drivers/memory/samsung/Makefile b/drivers/memory/samsung/Makefile
index 00587be66211..ea071be21c44 100644
--- a/drivers/memory/samsung/Makefile
+++ b/drivers/memory/samsung/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_EXYNOS5422_DMC) += exynos5422-dmc.o
obj-$(CONFIG_EXYNOS_SROM) += exynos-srom.o
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
new file mode 100644
index 000000000000..47dbf6d1789f
--- /dev/null
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -0,0 +1,1550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * Author: Lukasz Luba <l.luba@partner.samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
+#define EXYNOS5_DREXI_TIMINGAREF (0x0030)
+#define EXYNOS5_DREXI_TIMINGROW0 (0x0034)
+#define EXYNOS5_DREXI_TIMINGDATA0 (0x0038)
+#define EXYNOS5_DREXI_TIMINGPOWER0 (0x003C)
+#define EXYNOS5_DREXI_TIMINGROW1 (0x00E4)
+#define EXYNOS5_DREXI_TIMINGDATA1 (0x00E8)
+#define EXYNOS5_DREXI_TIMINGPOWER1 (0x00EC)
+#define CDREX_PAUSE (0x2091c)
+#define CDREX_LPDDR3PHY_CON3 (0x20a20)
+#define CDREX_LPDDR3PHY_CLKM_SRC (0x20700)
+#define EXYNOS5_TIMING_SET_SWI BIT(28)
+#define USE_MX_MSPLL_TIMINGS (1)
+#define USE_BPLL_TIMINGS (0)
+#define EXYNOS5_AREF_NORMAL (0x2e)
+
+#define DREX_PPCCLKCON (0x0130)
+#define DREX_PEREV2CONFIG (0x013c)
+#define DREX_PMNC_PPC (0xE000)
+#define DREX_CNTENS_PPC (0xE010)
+#define DREX_CNTENC_PPC (0xE020)
+#define DREX_INTENS_PPC (0xE030)
+#define DREX_INTENC_PPC (0xE040)
+#define DREX_FLAG_PPC (0xE050)
+#define DREX_PMCNT2_PPC (0xE130)
+
+/*
+ * A value for register DREX_PMNC_PPC which should be written to reset
+ * the cycle counter CCNT (a reference wall clock). It sets zero to the
+ * CCNT counter.
+ */
+#define CC_RESET BIT(2)
+
+/*
+ * A value for register DREX_PMNC_PPC which does the reset of all performance
+ * counters to zero.
+ */
+#define PPC_COUNTER_RESET BIT(1)
+
+/*
+ * Enables all configured counters (including cycle counter). The value should
+ * be written to the register DREX_PMNC_PPC.
+ */
+#define PPC_ENABLE BIT(0)
+
+/* A value for register DREX_PPCCLKCON which enables performance events clock.
+ * Must be written before first access to the performance counters register
+ * set, otherwise it could crash.
+ */
+#define PEREV_CLK_EN BIT(0)
+
+/*
+ * Values which are used to enable counters, interrupts or configure flags of
+ * the performance counters. They configure counter 2 and cycle counter.
+ */
+#define PERF_CNT2 BIT(2)
+#define PERF_CCNT BIT(31)
+
+/*
+ * Performance event types which are used for setting the preferred event
+ * to track in the counters.
+ * There is a set of different types, the values are from range 0 to 0x6f.
+ * These settings should be written to the configuration register which manages
+ * the type of the event (register DREX_PEREV2CONFIG).
+ */
+#define READ_TRANSFER_CH0 (0x6d)
+#define READ_TRANSFER_CH1 (0x6f)
+
+#define PERF_COUNTER_START_VALUE 0xff000000
+#define PERF_EVENT_UP_DOWN_THRESHOLD 900000000ULL
+
+/**
+ * struct dmc_opp_table - Operating level desciption
+ *
+ * Covers frequency and voltage settings of the DMC operating mode.
+ */
+struct dmc_opp_table {
+ u32 freq_hz;
+ u32 volt_uv;
+};
+
+/**
+ * struct exynos5_dmc - main structure describing DMC device
+ *
+ * The main structure for the Dynamic Memory Controller which covers clocks,
+ * memory regions, HW information, parameters and current operating mode.
+ */
+struct exynos5_dmc {
+ struct device *dev;
+ struct devfreq *df;
+ struct devfreq_simple_ondemand_data gov_data;
+ void __iomem *base_drexi0;
+ void __iomem *base_drexi1;
+ struct regmap *clk_regmap;
+ struct mutex lock;
+ unsigned long curr_rate;
+ unsigned long curr_volt;
+ unsigned long bypass_rate;
+ struct dmc_opp_table *opp;
+ struct dmc_opp_table opp_bypass;
+ int opp_count;
+ u32 timings_arr_size;
+ u32 *timing_row;
+ u32 *timing_data;
+ u32 *timing_power;
+ const struct lpddr3_timings *timings;
+ const struct lpddr3_min_tck *min_tck;
+ u32 bypass_timing_row;
+ u32 bypass_timing_data;
+ u32 bypass_timing_power;
+ struct regulator *vdd_mif;
+ struct clk *fout_spll;
+ struct clk *fout_bpll;
+ struct clk *mout_spll;
+ struct clk *mout_bpll;
+ struct clk *mout_mclk_cdrex;
+ struct clk *mout_mx_mspll_ccore;
+ struct clk *mx_mspll_ccore_phy;
+ struct clk *mout_mx_mspll_ccore_phy;
+ struct devfreq_event_dev **counter;
+ int num_counters;
+ u64 last_overflow_ts[2];
+ unsigned long load;
+ unsigned long total;
+ bool in_irq_mode;
+};
+
+#define TIMING_FIELD(t_name, t_bit_beg, t_bit_end) \
+ { .name = t_name, .bit_beg = t_bit_beg, .bit_end = t_bit_end }
+
+#define TIMING_VAL2REG(timing, t_val) \
+({ \
+ u32 __val; \
+ __val = (t_val) << (timing)->bit_beg; \
+ __val; \
+})
+
+struct timing_reg {
+ char *name;
+ int bit_beg;
+ int bit_end;
+ unsigned int val;
+};
+
+static const struct timing_reg timing_row[] = {
+ TIMING_FIELD("tRFC", 24, 31),
+ TIMING_FIELD("tRRD", 20, 23),
+ TIMING_FIELD("tRP", 16, 19),
+ TIMING_FIELD("tRCD", 12, 15),
+ TIMING_FIELD("tRC", 6, 11),
+ TIMING_FIELD("tRAS", 0, 5),
+};
+
+static const struct timing_reg timing_data[] = {
+ TIMING_FIELD("tWTR", 28, 31),
+ TIMING_FIELD("tWR", 24, 27),
+ TIMING_FIELD("tRTP", 20, 23),
+ TIMING_FIELD("tW2W-C2C", 14, 14),
+ TIMING_FIELD("tR2R-C2C", 12, 12),
+ TIMING_FIELD("WL", 8, 11),
+ TIMING_FIELD("tDQSCK", 4, 7),
+ TIMING_FIELD("RL", 0, 3),
+};
+
+static const struct timing_reg timing_power[] = {
+ TIMING_FIELD("tFAW", 26, 31),
+ TIMING_FIELD("tXSR", 16, 25),
+ TIMING_FIELD("tXP", 8, 15),
+ TIMING_FIELD("tCKE", 4, 7),
+ TIMING_FIELD("tMRD", 0, 3),
+};
+
+#define TIMING_COUNT (ARRAY_SIZE(timing_row) + ARRAY_SIZE(timing_data) + \
+ ARRAY_SIZE(timing_power))
+
+static int exynos5_counters_set_event(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_set_event(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos5_counters_enable_edev(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_enable_edev(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos5_counters_disable_edev(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_disable_edev(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * find_target_freq_id() - Finds requested frequency in local DMC configuration
+ * @dmc: device for which the information is checked
+ * @target_rate: requested frequency in KHz
+ *
+ * Seeks in the local DMC driver structure for the requested frequency value
+ * and returns index or error value.
+ */
+static int find_target_freq_idx(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int i;
+
+ for (i = dmc->opp_count - 1; i >= 0; i--)
+ if (dmc->opp[i].freq_hz <= target_rate)
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * exynos5_switch_timing_regs() - Changes bank register set for DRAM timings
+ * @dmc: device for which the new settings is going to be applied
+ * @set: boolean variable passing set value
+ *
+ * Changes the register set, which holds timing parameters.
+ * There is two register sets: 0 and 1. The register set 0
+ * is used in normal operation when the clock is provided from main PLL.
+ * The bank register set 1 is used when the main PLL frequency is going to be
+ * changed and the clock is taken from alternative, stable source.
+ * This function switches between these banks according to the
+ * currently used clock source.
+ */
+static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
+
+ if (set)
+ reg |= EXYNOS5_TIMING_SET_SWI;
+ else
+ reg &= ~EXYNOS5_TIMING_SET_SWI;
+
+ regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
+}
+
+/**
+ * exynos5_init_freq_table() - Initialized PM OPP framework
+ * @dmc: DMC device for which the frequencies are used for OPP init
+ * @profile: devfreq device's profile
+ *
+ * Populate the devfreq device's OPP table based on current frequency, voltage.
+ */
+static int exynos5_init_freq_table(struct exynos5_dmc *dmc,
+ struct devfreq_dev_profile *profile)
+{
+ int i, ret;
+ int idx;
+ unsigned long freq;
+
+ ret = dev_pm_opp_of_add_table(dmc->dev);
+ if (ret < 0) {
+ dev_err(dmc->dev, "Failed to get OPP table\n");
+ return ret;
+ }
+
+ dmc->opp_count = dev_pm_opp_get_opp_count(dmc->dev);
+
+ dmc->opp = devm_kmalloc_array(dmc->dev, dmc->opp_count,
+ sizeof(struct dmc_opp_table), GFP_KERNEL);
+ if (!dmc->opp)
+ goto err_opp;
+
+ idx = dmc->opp_count - 1;
+ for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) {
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_floor(dmc->dev, &freq);
+ if (IS_ERR(opp))
+ goto err_opp;
+
+ dmc->opp[idx - i].freq_hz = freq;
+ dmc->opp[idx - i].volt_uv = dev_pm_opp_get_voltage(opp);
+
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+
+err_opp:
+ dev_pm_opp_of_remove_table(dmc->dev);
+
+ return -EINVAL;
+}
+
+/**
+ * exynos5_set_bypass_dram_timings() - Low-level changes of the DRAM timings
+ * @dmc: device for which the new settings is going to be applied
+ * @param: DRAM parameters which passes timing data
+ *
+ * Low-level function for changing timings for DRAM memory clocking from
+ * 'bypass' clock source (fixed frequency @400MHz).
+ * It uses timing bank registers set 1.
+ */
+static void exynos5_set_bypass_dram_timings(struct exynos5_dmc *dmc)
+{
+ writel(EXYNOS5_AREF_NORMAL,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
+
+ writel(dmc->bypass_timing_row,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW1);
+ writel(dmc->bypass_timing_row,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW1);
+ writel(dmc->bypass_timing_data,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA1);
+ writel(dmc->bypass_timing_data,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA1);
+ writel(dmc->bypass_timing_power,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER1);
+ writel(dmc->bypass_timing_power,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER1);
+}
+
+/**
+ * exynos5_dram_change_timings() - Low-level changes of the DRAM final timings
+ * @dmc: device for which the new settings is going to be applied
+ * @target_rate: target frequency of the DMC
+ *
+ * Low-level function for changing timings for DRAM memory operating from main
+ * clock source (BPLL), which can have different frequencies. Thus, each
+ * frequency must have corresponding timings register values in order to keep
+ * the needed delays.
+ * It uses timing bank registers set 0.
+ */
+static int exynos5_dram_change_timings(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int idx;
+
+ for (idx = dmc->opp_count - 1; idx >= 0; idx--)
+ if (dmc->opp[idx].freq_hz <= target_rate)
+ break;
+
+ if (idx < 0)
+ return -EINVAL;
+
+ writel(EXYNOS5_AREF_NORMAL,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
+
+ writel(dmc->timing_row[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW0);
+ writel(dmc->timing_row[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW0);
+ writel(dmc->timing_data[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA0);
+ writel(dmc->timing_data[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA0);
+ writel(dmc->timing_power[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER0);
+ writel(dmc->timing_power[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER0);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_align_target_voltage() - Sets the final voltage for the DMC
+ * @dmc: device for which it is going to be set
+ * @target_volt: new voltage which is chosen to be final
+ *
+ * Function tries to align voltage to the safe level for 'normal' mode.
+ * It checks the need of higher voltage and changes the value. The target
+ * voltage might be lower that currently set and still the system will be
+ * stable.
+ */
+static int exynos5_dmc_align_target_voltage(struct exynos5_dmc *dmc,
+ unsigned long target_volt)
+{
+ int ret = 0;
+
+ if (dmc->curr_volt <= target_volt)
+ return 0;
+
+ ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
+ target_volt);
+ if (!ret)
+ dmc->curr_volt = target_volt;
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_align_bypass_voltage() - Sets the voltage for the DMC
+ * @dmc: device for which it is going to be set
+ * @target_volt: new voltage which is chosen to be final
+ *
+ * Function tries to align voltage to the safe level for the 'bypass' mode.
+ * It checks the need of higher voltage and changes the value.
+ * The target voltage must not be less than currently needed, because
+ * for current frequency the device might become unstable.
+ */
+static int exynos5_dmc_align_bypass_voltage(struct exynos5_dmc *dmc,
+ unsigned long target_volt)
+{
+ int ret = 0;
+ unsigned long bypass_volt = dmc->opp_bypass.volt_uv;
+
+ target_volt = max(bypass_volt, target_volt);
+
+ if (dmc->curr_volt >= target_volt)
+ return 0;
+
+ ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
+ target_volt);
+ if (!ret)
+ dmc->curr_volt = target_volt;
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_align_bypass_dram_timings() - Chooses and sets DRAM timings
+ * @dmc: device for which it is going to be set
+ * @target_rate: new frequency which is chosen to be final
+ *
+ * Function changes the DRAM timings for the temporary 'bypass' mode.
+ */
+static int exynos5_dmc_align_bypass_dram_timings(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int idx = find_target_freq_idx(dmc, target_rate);
+
+ if (idx < 0)
+ return -EINVAL;
+
+ exynos5_set_bypass_dram_timings(dmc);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_switch_to_bypass_configuration() - Switching to temporary clock
+ * @dmc: DMC device for which the switching is going to happen
+ * @target_rate: new frequency which is going to be set as a final
+ * @target_volt: new voltage which is going to be set as a final
+ *
+ * Function configures DMC and clocks for operating in temporary 'bypass' mode.
+ * This mode is used only temporary but if required, changes voltage and timings
+ * for DRAM chips. It switches the main clock to stable clock source for the
+ * period of the main PLL reconfiguration.
+ */
+static int
+exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
+ unsigned long target_rate,
+ unsigned long target_volt)
+{
+ int ret;
+
+ /*
+ * Having higher voltage for a particular frequency does not harm
+ * the chip. Use it for the temporary frequency change when one
+ * voltage manipulation might be avoided.
+ */
+ ret = exynos5_dmc_align_bypass_voltage(dmc, target_volt);
+ if (ret)
+ return ret;
+
+ /*
+ * Longer delays for DRAM does not cause crash, the opposite does.
+ */
+ ret = exynos5_dmc_align_bypass_dram_timings(dmc, target_rate);
+ if (ret)
+ return ret;
+
+ /*
+ * Delays are long enough, so use them for the new coming clock.
+ */
+ exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_change_freq_and_volt() - Changes voltage and frequency of the DMC
+ * using safe procedure
+ * @dmc: device for which the frequency is going to be changed
+ * @target_rate: requested new frequency
+ * @target_volt: requested voltage which corresponds to the new frequency
+ *
+ * The DMC frequency change procedure requires a few steps.
+ * The main requirement is to change the clock source in the clk mux
+ * for the time of main clock PLL locking. The assumption is that the
+ * alternative clock source set as parent is stable.
+ * The second parent's clock frequency is fixed to 400MHz, it is named 'bypass'
+ * clock. This requires alignment in DRAM timing parameters for the new
+ * T-period. There is two bank sets for keeping DRAM
+ * timings: set 0 and set 1. The set 0 is used when main clock source is
+ * chosen. The 2nd set of regs is used for 'bypass' clock. Switching between
+ * the two bank sets is part of the process.
+ * The voltage must also be aligned to the minimum required level. There is
+ * this intermediate step with switching to 'bypass' parent clock source.
+ * if the old voltage is lower, it requires an increase of the voltage level.
+ * The complexity of the voltage manipulation is hidden in low level function.
+ * In this function there is last alignment of the voltage level at the end.
+ */
+static int
+exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
+ unsigned long target_rate,
+ unsigned long target_volt)
+{
+ int ret;
+
+ ret = exynos5_dmc_switch_to_bypass_configuration(dmc, target_rate,
+ target_volt);
+ if (ret)
+ return ret;
+
+ /*
+ * Voltage is set at least to a level needed for this frequency,
+ * so switching clock source is safe now.
+ */
+ clk_prepare_enable(dmc->fout_spll);
+ clk_prepare_enable(dmc->mout_spll);
+ clk_prepare_enable(dmc->mout_mx_mspll_ccore);
+
+ ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_mx_mspll_ccore);
+ if (ret)
+ goto disable_clocks;
+
+ /*
+ * We are safe to increase the timings for current bypass frequency.
+ * Thanks to this the settings will be ready for the upcoming clock
+ * source change.
+ */
+ exynos5_dram_change_timings(dmc, target_rate);
+
+ clk_set_rate(dmc->fout_bpll, target_rate);
+
+ exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+
+ ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
+ if (ret)
+ goto disable_clocks;
+
+ /*
+ * Make sure if the voltage is not from 'bypass' settings and align to
+ * the right level for power efficiency.
+ */
+ ret = exynos5_dmc_align_target_voltage(dmc, target_volt);
+
+disable_clocks:
+ clk_disable_unprepare(dmc->mout_mx_mspll_ccore);
+ clk_disable_unprepare(dmc->mout_spll);
+ clk_disable_unprepare(dmc->fout_spll);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_get_volt_freq() - Gets the frequency and voltage from the OPP
+ * table.
+ * @dmc: device for which the frequency is going to be changed
+ * @freq: requested frequency in KHz
+ * @target_rate: returned frequency which is the same or lower than
+ * requested
+ * @target_volt: returned voltage which corresponds to the returned
+ * frequency
+ *
+ * Function gets requested frequency and checks OPP framework for needed
+ * frequency and voltage. It populates the values 'target_rate' and
+ * 'target_volt' or returns error value when OPP framework fails.
+ */
+static int exynos5_dmc_get_volt_freq(struct exynos5_dmc *dmc,
+ unsigned long *freq,
+ unsigned long *target_rate,
+ unsigned long *target_volt, u32 flags)
+{
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dmc->dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ *target_rate = dev_pm_opp_get_freq(opp);
+ *target_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_target() - Function responsible for changing frequency of DMC
+ * @dev: device for which the frequency is going to be changed
+ * @freq: requested frequency in KHz
+ * @flags: flags provided for this frequency change request
+ *
+ * An entry function provided to the devfreq framework which provides frequency
+ * change of the DMC. The function gets the possible rate from OPP table based
+ * on requested frequency. It calls the next function responsible for the
+ * frequency and voltage change. In case of failure, does not set 'curr_rate'
+ * and returns error value to the framework.
+ */
+static int exynos5_dmc_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+ unsigned long target_rate = 0;
+ unsigned long target_volt = 0;
+ int ret;
+
+ ret = exynos5_dmc_get_volt_freq(dmc, freq, &target_rate, &target_volt,
+ flags);
+
+ if (ret)
+ return ret;
+
+ if (target_rate == dmc->curr_rate)
+ return 0;
+
+ mutex_lock(&dmc->lock);
+
+ ret = exynos5_dmc_change_freq_and_volt(dmc, target_rate, target_volt);
+
+ if (ret) {
+ mutex_unlock(&dmc->lock);
+ return ret;
+ }
+
+ dmc->curr_rate = target_rate;
+
+ mutex_unlock(&dmc->lock);
+ return 0;
+}
+
+/**
+ * exynos5_counters_get() - Gets the performance counters values.
+ * @dmc: device for which the counters are going to be checked
+ * @load_count: variable which is populated with counter value
+ * @total_count: variable which is used as 'wall clock' reference
+ *
+ * Function which provides performance counters values. It sums up counters for
+ * two DMC channels. The 'total_count' is used as a reference and max value.
+ * The ratio 'load_count/total_count' shows the busy percentage [0%, 100%].
+ */
+static int exynos5_counters_get(struct exynos5_dmc *dmc,
+ unsigned long *load_count,
+ unsigned long *total_count)
+{
+ unsigned long total = 0;
+ struct devfreq_event_data event;
+ int ret, i;
+
+ *load_count = 0;
+
+ /* Take into account only read+write counters, but stop all */
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+
+ ret = devfreq_event_get_event(dmc->counter[i], &event);
+ if (ret < 0)
+ return ret;
+
+ *load_count += event.load_count;
+
+ if (total < event.total_count)
+ total = event.total_count;
+ }
+
+ *total_count = total;
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_start_perf_events() - Setup and start performance event counters
+ * @dmc: device for which the counters are going to be checked
+ * @beg_value: initial value for the counter
+ *
+ * Function which enables needed counters, interrupts and sets initial values
+ * then starts the counters.
+ */
+static void exynos5_dmc_start_perf_events(struct exynos5_dmc *dmc,
+ u32 beg_value)
+{
+ /* Enable interrupts for counter 2 */
+ writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENS_PPC);
+ writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENS_PPC);
+
+ /* Enable counter 2 and CCNT */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENS_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENS_PPC);
+
+ /* Clear overflow flag for all counters */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
+
+ /* Reset all counters */
+ writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /*
+ * Set start value for the counters, the number of samples that
+ * will be gathered is calculated as: 0xffffffff - beg_value
+ */
+ writel(beg_value, dmc->base_drexi0 + DREX_PMCNT2_PPC);
+ writel(beg_value, dmc->base_drexi1 + DREX_PMCNT2_PPC);
+
+ /* Start all counters */
+ writel(PPC_ENABLE, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(PPC_ENABLE, dmc->base_drexi1 + DREX_PMNC_PPC);
+}
+
+/**
+ * exynos5_dmc_perf_events_calc() - Calculate utilization
+ * @dmc: device for which the counters are going to be checked
+ * @diff_ts: time between last interrupt and current one
+ *
+ * Function which calculates needed utilization for the devfreq governor.
+ * It prepares values for 'busy_time' and 'total_time' based on elapsed time
+ * between interrupts, which approximates utilization.
+ */
+static void exynos5_dmc_perf_events_calc(struct exynos5_dmc *dmc, u64 diff_ts)
+{
+ /*
+ * This is a simple algorithm for managing traffic on DMC.
+ * When there is almost no load the counters overflow every 4s,
+ * no mater the DMC frequency.
+ * The high load might be approximated using linear function.
+ * Knowing that, simple calculation can provide 'busy_time' and
+ * 'total_time' to the devfreq governor which picks up target
+ * frequency.
+ * We want a fast ramp up and slow decay in frequency change function.
+ */
+ if (diff_ts < PERF_EVENT_UP_DOWN_THRESHOLD) {
+ /*
+ * Set higher utilization for the simple_ondemand governor.
+ * The governor should increase the frequency of the DMC.
+ */
+ dmc->load = 70;
+ dmc->total = 100;
+ } else {
+ /*
+ * Set low utilization for the simple_ondemand governor.
+ * The governor should decrease the frequency of the DMC.
+ */
+ dmc->load = 35;
+ dmc->total = 100;
+ }
+
+ dev_dbg(dmc->dev, "diff_ts=%llu\n", diff_ts);
+}
+
+/**
+ * exynos5_dmc_perf_events_check() - Checks the status of the counters
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which is called from threaded IRQ to check the counters state
+ * and to call approximation for the needed utilization.
+ */
+static void exynos5_dmc_perf_events_check(struct exynos5_dmc *dmc)
+{
+ u32 val;
+ u64 diff_ts, ts;
+
+ ts = ktime_get_ns();
+
+ /* Stop all counters */
+ writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /* Check the source in interrupt flag registers (which channel) */
+ val = readl(dmc->base_drexi0 + DREX_FLAG_PPC);
+ if (val) {
+ diff_ts = ts - dmc->last_overflow_ts[0];
+ dmc->last_overflow_ts[0] = ts;
+ dev_dbg(dmc->dev, "drex0 0xE050 val= 0x%08x\n", val);
+ } else {
+ val = readl(dmc->base_drexi1 + DREX_FLAG_PPC);
+ diff_ts = ts - dmc->last_overflow_ts[1];
+ dmc->last_overflow_ts[1] = ts;
+ dev_dbg(dmc->dev, "drex1 0xE050 val= 0x%08x\n", val);
+ }
+
+ exynos5_dmc_perf_events_calc(dmc, diff_ts);
+
+ exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
+}
+
+/**
+ * exynos5_dmc_enable_perf_events() - Enable performance events
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which is setup needed environment and enables counters.
+ */
+static void exynos5_dmc_enable_perf_events(struct exynos5_dmc *dmc)
+{
+ u64 ts;
+
+ /* Enable Performance Event Clock */
+ writel(PEREV_CLK_EN, dmc->base_drexi0 + DREX_PPCCLKCON);
+ writel(PEREV_CLK_EN, dmc->base_drexi1 + DREX_PPCCLKCON);
+
+ /* Select read transfers as performance event2 */
+ writel(READ_TRANSFER_CH0, dmc->base_drexi0 + DREX_PEREV2CONFIG);
+ writel(READ_TRANSFER_CH1, dmc->base_drexi1 + DREX_PEREV2CONFIG);
+
+ ts = ktime_get_ns();
+ dmc->last_overflow_ts[0] = ts;
+ dmc->last_overflow_ts[1] = ts;
+
+ /* Devfreq shouldn't be faster than initialization, play safe though. */
+ dmc->load = 99;
+ dmc->total = 100;
+}
+
+/**
+ * exynos5_dmc_disable_perf_events() - Disable performance events
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which stops, disables performance event counters and interrupts.
+ */
+static void exynos5_dmc_disable_perf_events(struct exynos5_dmc *dmc)
+{
+ /* Stop all counters */
+ writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /* Disable interrupts for counter 2 */
+ writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENC_PPC);
+ writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENC_PPC);
+
+ /* Disable counter 2 and CCNT */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENC_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENC_PPC);
+
+ /* Clear overflow flag for all counters */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
+}
+
+/**
+ * exynos5_dmc_get_status() - Read current DMC performance statistics.
+ * @dev: device for which the statistics are requested
+ * @stat: structure which has statistic fields
+ *
+ * Function reads the DMC performance counters and calculates 'busy_time'
+ * and 'total_time'. To protect from overflow, the values are shifted right
+ * by 10. After read out the counters are setup to count again.
+ */
+static int exynos5_dmc_get_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+ unsigned long load, total;
+ int ret;
+
+ if (dmc->in_irq_mode) {
+ stat->current_frequency = dmc->curr_rate;
+ stat->busy_time = dmc->load;
+ stat->total_time = dmc->total;
+ } else {
+ ret = exynos5_counters_get(dmc, &load, &total);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* To protect from overflow, divide by 1024 */
+ stat->busy_time = load >> 10;
+ stat->total_time = total >> 10;
+
+ ret = exynos5_counters_set_event(dmc);
+ if (ret < 0) {
+ dev_err(dev, "could not set event counter\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_get_cur_freq() - Function returns current DMC frequency
+ * @dev: device for which the framework checks operating frequency
+ * @freq: returned frequency value
+ *
+ * It returns the currently used frequency of the DMC. The real operating
+ * frequency might be lower when the clock source value could not be divided
+ * to the requested value.
+ */
+static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+
+ mutex_lock(&dmc->lock);
+ *freq = dmc->curr_rate;
+ mutex_unlock(&dmc->lock);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_df_profile - Devfreq governor's profile structure
+ *
+ * It provides to the devfreq framework needed functions and polling period.
+ */
+static struct devfreq_dev_profile exynos5_dmc_df_profile = {
+ .target = exynos5_dmc_target,
+ .get_dev_status = exynos5_dmc_get_status,
+ .get_cur_freq = exynos5_dmc_get_cur_freq,
+};
+
+/**
+ * exynos5_dmc_align_initial_frequency() - Align initial frequency value
+ * @dmc: device for which the frequency is going to be set
+ * @bootloader_init_freq: initial frequency set by the bootloader in KHz
+ *
+ * The initial bootloader frequency, which is present during boot, might be
+ * different that supported frequency values in the driver. It is possible
+ * due to different PLL settings or used PLL as a source.
+ * This function provides the 'initial_freq' for the devfreq framework
+ * statistics engine which supports only registered values. Thus, some alignment
+ * must be made.
+ */
+static unsigned long
+exynos5_dmc_align_init_freq(struct exynos5_dmc *dmc,
+ unsigned long bootloader_init_freq)
+{
+ unsigned long aligned_freq;
+ int idx;
+
+ idx = find_target_freq_idx(dmc, bootloader_init_freq);
+ if (idx >= 0)
+ aligned_freq = dmc->opp[idx].freq_hz;
+ else
+ aligned_freq = dmc->opp[dmc->opp_count - 1].freq_hz;
+
+ return aligned_freq;
+}
+
+/**
+ * create_timings_aligned() - Create register values and align with standard
+ * @dmc: device for which the frequency is going to be set
+ * @idx: speed bin in the OPP table
+ * @clk_period_ps: the period of the clock, known as tCK
+ *
+ * The function calculates timings and creates a register value ready for
+ * a frequency transition. The register contains a few timings. They are
+ * shifted by a known offset. The timing value is calculated based on memory
+ * specyfication: minimal time required and minimal cycles required.
+ */
+static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
+ u32 *reg_timing_data, u32 *reg_timing_power,
+ u32 clk_period_ps)
+{
+ u32 val;
+ const struct timing_reg *reg;
+
+ if (clk_period_ps == 0)
+ return -EINVAL;
+
+ *reg_timing_row = 0;
+ *reg_timing_data = 0;
+ *reg_timing_power = 0;
+
+ val = dmc->timings->tRFC / clk_period_ps;
+ val += dmc->timings->tRFC % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRFC);
+ reg = &timing_row[0];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRRD / clk_period_ps;
+ val += dmc->timings->tRRD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRRD);
+ reg = &timing_row[1];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRPab / clk_period_ps;
+ val += dmc->timings->tRPab % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRPab);
+ reg = &timing_row[2];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRCD / clk_period_ps;
+ val += dmc->timings->tRCD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRCD);
+ reg = &timing_row[3];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRC / clk_period_ps;
+ val += dmc->timings->tRC % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRC);
+ reg = &timing_row[4];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRAS / clk_period_ps;
+ val += dmc->timings->tRAS % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRAS);
+ reg = &timing_row[5];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ /* data related timings */
+ val = dmc->timings->tWTR / clk_period_ps;
+ val += dmc->timings->tWTR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWTR);
+ reg = &timing_data[0];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tWR / clk_period_ps;
+ val += dmc->timings->tWR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWR);
+ reg = &timing_data[1];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRTP / clk_period_ps;
+ val += dmc->timings->tRTP % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRTP);
+ reg = &timing_data[2];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tW2W_C2C / clk_period_ps;
+ val += dmc->timings->tW2W_C2C % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tW2W_C2C);
+ reg = &timing_data[3];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tR2R_C2C / clk_period_ps;
+ val += dmc->timings->tR2R_C2C % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tR2R_C2C);
+ reg = &timing_data[4];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tWL / clk_period_ps;
+ val += dmc->timings->tWL % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWL);
+ reg = &timing_data[5];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tDQSCK / clk_period_ps;
+ val += dmc->timings->tDQSCK % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tDQSCK);
+ reg = &timing_data[6];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRL / clk_period_ps;
+ val += dmc->timings->tRL % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRL);
+ reg = &timing_data[7];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ /* power related timings */
+ val = dmc->timings->tFAW / clk_period_ps;
+ val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXP);
+ reg = &timing_power[0];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tXSR / clk_period_ps;
+ val += dmc->timings->tXSR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXSR);
+ reg = &timing_power[1];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tXP / clk_period_ps;
+ val += dmc->timings->tXP % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXP);
+ reg = &timing_power[2];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tCKE / clk_period_ps;
+ val += dmc->timings->tCKE % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tCKE);
+ reg = &timing_power[3];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tMRD / clk_period_ps;
+ val += dmc->timings->tMRD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tMRD);
+ reg = &timing_power[4];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ return 0;
+}
+
+/**
+ * of_get_dram_timings() - helper function for parsing DT settings for DRAM
+ * @dmc: device for which the frequency is going to be set
+ *
+ * The function parses DT entries with DRAM information.
+ */
+static int of_get_dram_timings(struct exynos5_dmc *dmc)
+{
+ int ret = 0;
+ int idx;
+ struct device_node *np_ddr;
+ u32 freq_mhz, clk_period_ps;
+
+ np_ddr = of_parse_phandle(dmc->dev->of_node, "device-handle", 0);
+ if (!np_ddr) {
+ dev_warn(dmc->dev, "could not find 'device-handle' in DT\n");
+ return -EINVAL;
+ }
+
+ dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_row)
+ return -ENOMEM;
+
+ dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_data)
+ return -ENOMEM;
+
+ dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_power)
+ return -ENOMEM;
+
+ dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
+ DDR_TYPE_LPDDR3,
+ &dmc->timings_arr_size);
+ if (!dmc->timings) {
+ of_node_put(np_ddr);
+ dev_warn(dmc->dev, "could not get timings from DT\n");
+ return -EINVAL;
+ }
+
+ dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
+ if (!dmc->min_tck) {
+ of_node_put(np_ddr);
+ dev_warn(dmc->dev, "could not get tck from DT\n");
+ return -EINVAL;
+ }
+
+ /* Sorted array of OPPs with frequency ascending */
+ for (idx = 0; idx < dmc->opp_count; idx++) {
+ freq_mhz = dmc->opp[idx].freq_hz / 1000000;
+ clk_period_ps = 1000000 / freq_mhz;
+
+ ret = create_timings_aligned(dmc, &dmc->timing_row[idx],
+ &dmc->timing_data[idx],
+ &dmc->timing_power[idx],
+ clk_period_ps);
+ }
+
+ of_node_put(np_ddr);
+
+ /* Take the highest frequency's timings as 'bypass' */
+ dmc->bypass_timing_row = dmc->timing_row[idx - 1];
+ dmc->bypass_timing_data = dmc->timing_data[idx - 1];
+ dmc->bypass_timing_power = dmc->timing_power[idx - 1];
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_init_clks() - Initialize clocks needed for DMC operation.
+ * @dmc: DMC structure containing needed fields
+ *
+ * Get the needed clocks defined in DT device, enable and set the right parents.
+ * Read current frequency and initialize the initial rate for governor.
+ */
+static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
+{
+ int ret;
+ unsigned long target_volt = 0;
+ unsigned long target_rate = 0;
+ unsigned int tmp;
+
+ dmc->fout_spll = devm_clk_get(dmc->dev, "fout_spll");
+ if (IS_ERR(dmc->fout_spll))
+ return PTR_ERR(dmc->fout_spll);
+
+ dmc->fout_bpll = devm_clk_get(dmc->dev, "fout_bpll");
+ if (IS_ERR(dmc->fout_bpll))
+ return PTR_ERR(dmc->fout_bpll);
+
+ dmc->mout_mclk_cdrex = devm_clk_get(dmc->dev, "mout_mclk_cdrex");
+ if (IS_ERR(dmc->mout_mclk_cdrex))
+ return PTR_ERR(dmc->mout_mclk_cdrex);
+
+ dmc->mout_bpll = devm_clk_get(dmc->dev, "mout_bpll");
+ if (IS_ERR(dmc->mout_bpll))
+ return PTR_ERR(dmc->mout_bpll);
+
+ dmc->mout_mx_mspll_ccore = devm_clk_get(dmc->dev,
+ "mout_mx_mspll_ccore");
+ if (IS_ERR(dmc->mout_mx_mspll_ccore))
+ return PTR_ERR(dmc->mout_mx_mspll_ccore);
+
+ dmc->mout_spll = devm_clk_get(dmc->dev, "ff_dout_spll2");
+ if (IS_ERR(dmc->mout_spll)) {
+ dmc->mout_spll = devm_clk_get(dmc->dev, "mout_sclk_spll");
+ if (IS_ERR(dmc->mout_spll))
+ return PTR_ERR(dmc->mout_spll);
+ }
+
+ /*
+ * Convert frequency to KHz values and set it for the governor.
+ */
+ dmc->curr_rate = clk_get_rate(dmc->mout_mclk_cdrex);
+ dmc->curr_rate = exynos5_dmc_align_init_freq(dmc, dmc->curr_rate);
+ exynos5_dmc_df_profile.initial_freq = dmc->curr_rate;
+
+ ret = exynos5_dmc_get_volt_freq(dmc, &dmc->curr_rate, &target_rate,
+ &target_volt, 0);
+ if (ret)
+ return ret;
+
+ dmc->curr_volt = target_volt;
+
+ clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
+
+ dmc->bypass_rate = clk_get_rate(dmc->mout_mx_mspll_ccore);
+
+ clk_prepare_enable(dmc->fout_bpll);
+ clk_prepare_enable(dmc->mout_bpll);
+
+ /*
+ * Some bootloaders do not set clock routes correctly.
+ * Stop one path in clocks to PHY.
+ */
+ regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, &tmp);
+ tmp &= ~(BIT(1) | BIT(0));
+ regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, tmp);
+
+ return 0;
+}
+
+/**
+ * exynos5_performance_counters_init() - Initializes performance DMC's counters
+ * @dmc: DMC for which it does the setup
+ *
+ * Initialization of performance counters in DMC for estimating usage.
+ * The counter's values are used for calculation of a memory bandwidth and based
+ * on that the governor changes the frequency.
+ * The counters are not used when the governor is GOVERNOR_USERSPACE.
+ */
+static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
+{
+ int counters_size;
+ int ret, i;
+
+ dmc->num_counters = devfreq_event_get_edev_count(dmc->dev);
+ if (dmc->num_counters < 0) {
+ dev_err(dmc->dev, "could not get devfreq-event counters\n");
+ return dmc->num_counters;
+ }
+
+ counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters;
+ dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL);
+ if (!dmc->counter)
+ return -ENOMEM;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ dmc->counter[i] =
+ devfreq_event_get_edev_by_phandle(dmc->dev, i);
+ if (IS_ERR_OR_NULL(dmc->counter[i]))
+ return -EPROBE_DEFER;
+ }
+
+ ret = exynos5_counters_enable_edev(dmc);
+ if (ret < 0) {
+ dev_err(dmc->dev, "could not enable event counter\n");
+ return ret;
+ }
+
+ ret = exynos5_counters_set_event(dmc);
+ if (ret < 0) {
+ exynos5_counters_disable_edev(dmc);
+ dev_err(dmc->dev, "could not set event counter\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_set_pause_on_switching() - Controls a pause feature in DMC
+ * @dmc: device which is used for changing this feature
+ * @set: a boolean state passing enable/disable request
+ *
+ * There is a need of pausing DREX DMC when divider or MUX in clock tree
+ * changes its configuration. In such situation access to the memory is blocked
+ * in DMC automatically. This feature is used when clock frequency change
+ * request appears and touches clock tree.
+ */
+static inline int exynos5_dmc_set_pause_on_switching(struct exynos5_dmc *dmc)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(dmc->clk_regmap, CDREX_PAUSE, &val);
+ if (ret)
+ return ret;
+
+ val |= 1UL;
+ regmap_write(dmc->clk_regmap, CDREX_PAUSE, val);
+
+ return 0;
+}
+
+static irqreturn_t dmc_irq_thread(int irq, void *priv)
+{
+ int res;
+ struct exynos5_dmc *dmc = priv;
+
+ mutex_lock(&dmc->df->lock);
+
+ exynos5_dmc_perf_events_check(dmc);
+
+ res = update_devfreq(dmc->df);
+ if (res)
+ dev_warn(dmc->dev, "devfreq failed with %d\n", res);
+
+ mutex_unlock(&dmc->df->lock);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * exynos5_dmc_probe() - Probe function for the DMC driver
+ * @pdev: platform device for which the driver is going to be initialized
+ *
+ * Initialize basic components: clocks, regulators, performance counters, etc.
+ * Read out product version and based on the information setup
+ * internal structures for the controller (frequency and voltage) and for DRAM
+ * memory parameters: timings for each operating frequency.
+ * Register new devfreq device for controlling DVFS of the DMC.
+ */
+static int exynos5_dmc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos5_dmc *dmc;
+ struct resource *res;
+ int irq[2];
+
+ dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL);
+ if (!dmc)
+ return -ENOMEM;
+
+ mutex_init(&dmc->lock);
+
+ dmc->dev = dev;
+ platform_set_drvdata(pdev, dmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmc->base_drexi0 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dmc->base_drexi0))
+ return PTR_ERR(dmc->base_drexi0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dmc->base_drexi1 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dmc->base_drexi1))
+ return PTR_ERR(dmc->base_drexi1);
+
+ dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
+ "samsung,syscon-clk");
+ if (IS_ERR(dmc->clk_regmap))
+ return PTR_ERR(dmc->clk_regmap);
+
+ ret = exynos5_init_freq_table(dmc, &exynos5_dmc_df_profile);
+ if (ret) {
+ dev_warn(dev, "couldn't initialize frequency settings\n");
+ return ret;
+ }
+
+ dmc->vdd_mif = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(dmc->vdd_mif)) {
+ ret = PTR_ERR(dmc->vdd_mif);
+ return ret;
+ }
+
+ ret = exynos5_dmc_init_clks(dmc);
+ if (ret)
+ return ret;
+
+ ret = of_get_dram_timings(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't initialize timings settings\n");
+ goto remove_clocks;
+ }
+
+ ret = exynos5_dmc_set_pause_on_switching(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't get access to PAUSE register\n");
+ goto remove_clocks;
+ }
+
+ /* There is two modes in which the driver works: polling or IRQ */
+ irq[0] = platform_get_irq_byname(pdev, "drex_0");
+ irq[1] = platform_get_irq_byname(pdev, "drex_1");
+ if (irq[0] > 0 && irq[1] > 0) {
+ ret = devm_request_threaded_irq(dev, irq[0], NULL,
+ dmc_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), dmc);
+ if (ret) {
+ dev_err(dev, "couldn't grab IRQ\n");
+ goto remove_clocks;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq[1], NULL,
+ dmc_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), dmc);
+ if (ret) {
+ dev_err(dev, "couldn't grab IRQ\n");
+ goto remove_clocks;
+ }
+
+ /*
+ * Setup default thresholds for the devfreq governor.
+ * The values are chosen based on experiments.
+ */
+ dmc->gov_data.upthreshold = 55;
+ dmc->gov_data.downdifferential = 5;
+
+ exynos5_dmc_enable_perf_events(dmc);
+
+ dmc->in_irq_mode = 1;
+ } else {
+ ret = exynos5_performance_counters_init(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't probe performance counters\n");
+ goto remove_clocks;
+ }
+
+ /*
+ * Setup default thresholds for the devfreq governor.
+ * The values are chosen based on experiments.
+ */
+ dmc->gov_data.upthreshold = 30;
+ dmc->gov_data.downdifferential = 5;
+
+ exynos5_dmc_df_profile.polling_ms = 500;
+ }
+
+
+ dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &dmc->gov_data);
+
+ if (IS_ERR(dmc->df)) {
+ ret = PTR_ERR(dmc->df);
+ goto err_devfreq_add;
+ }
+
+ if (dmc->in_irq_mode)
+ exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
+
+ dev_info(dev, "DMC initialized\n");
+
+ return 0;
+
+err_devfreq_add:
+ if (dmc->in_irq_mode)
+ exynos5_dmc_disable_perf_events(dmc);
+ else
+ exynos5_counters_disable_edev(dmc);
+remove_clocks:
+ clk_disable_unprepare(dmc->mout_bpll);
+ clk_disable_unprepare(dmc->fout_bpll);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_remove() - Remove function for the platform device
+ * @pdev: platform device which is going to be removed
+ *
+ * The function relies on 'devm' framework function which automatically
+ * clean the device's resources. It just calls explicitly disable function for
+ * the performance counters.
+ */
+static int exynos5_dmc_remove(struct platform_device *pdev)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(&pdev->dev);
+
+ if (dmc->in_irq_mode)
+ exynos5_dmc_disable_perf_events(dmc);
+ else
+ exynos5_counters_disable_edev(dmc);
+
+ clk_disable_unprepare(dmc->mout_bpll);
+ clk_disable_unprepare(dmc->fout_bpll);
+
+ dev_pm_opp_remove_table(dmc->dev);
+
+ return 0;
+}
+
+static const struct of_device_id exynos5_dmc_of_match[] = {
+ { .compatible = "samsung,exynos5422-dmc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos5_dmc_of_match);
+
+static struct platform_driver exynos5_dmc_platdrv = {
+ .probe = exynos5_dmc_probe,
+ .remove = exynos5_dmc_remove,
+ .driver = {
+ .name = "exynos5-dmc",
+ .of_match_table = exynos5_dmc_of_match,
+ },
+};
+module_platform_driver(exynos5_dmc_platdrv);
+MODULE_DESCRIPTION("Driver for Exynos5422 Dynamic Memory Controller dynamic frequency and voltage change");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lukasz Luba");
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 4680124ddcab..fbfbaada61a2 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -17,6 +17,16 @@ config TEGRA20_EMC
This driver is required to change memory timings / clock rate for
external memory.
+config TEGRA30_EMC
+ bool "NVIDIA Tegra30 External Memory Controller driver"
+ default y
+ depends on TEGRA_MC && ARCH_TEGRA_3x_SOC
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra30 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
config TEGRA124_EMC
bool "NVIDIA Tegra124 External Memory Controller driver"
default y
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 3971a6b7c487..3d23c4261104 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -11,5 +11,6 @@ tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o
+obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 3d8d322511c5..ec8403557ed4 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -18,39 +19,6 @@
#include "mc.h"
-#define MC_INTSTATUS 0x000
-
-#define MC_INTMASK 0x004
-
-#define MC_ERR_STATUS 0x08
-#define MC_ERR_STATUS_TYPE_SHIFT 28
-#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (6 << MC_ERR_STATUS_TYPE_SHIFT)
-#define MC_ERR_STATUS_TYPE_MASK (0x7 << MC_ERR_STATUS_TYPE_SHIFT)
-#define MC_ERR_STATUS_READABLE (1 << 27)
-#define MC_ERR_STATUS_WRITABLE (1 << 26)
-#define MC_ERR_STATUS_NONSECURE (1 << 25)
-#define MC_ERR_STATUS_ADR_HI_SHIFT 20
-#define MC_ERR_STATUS_ADR_HI_MASK 0x3
-#define MC_ERR_STATUS_SECURITY (1 << 17)
-#define MC_ERR_STATUS_RW (1 << 16)
-
-#define MC_ERR_ADR 0x0c
-
-#define MC_GART_ERROR_REQ 0x30
-#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
-#define MC_SECURITY_VIOLATION_STATUS 0x74
-
-#define MC_EMEM_ARB_CFG 0x90
-#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) (((x) & 0x1ff) << 0)
-#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
-#define MC_EMEM_ARB_MISC0 0xd8
-
-#define MC_EMEM_ADR_CFG 0x54
-#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
-
-#define MC_TIMING_CONTROL 0xfc
-#define MC_TIMING_UPDATE BIT(0)
-
static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
{ .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
@@ -307,7 +275,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
return 0;
}
-void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
+int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
{
unsigned int i;
struct tegra_mc_timing *timing = NULL;
@@ -322,11 +290,13 @@ void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
if (!timing) {
dev_err(mc->dev, "no memory timing registered for rate %lu\n",
rate);
- return;
+ return -EINVAL;
}
for (i = 0; i < mc->soc->num_emem_regs; ++i)
mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
+
+ return 0;
}
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
@@ -626,6 +596,7 @@ static int tegra_mc_probe(struct platform_device *pdev)
struct resource *res;
struct tegra_mc *mc;
void *isr;
+ u64 mask;
int err;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
@@ -637,6 +608,14 @@ static int tegra_mc_probe(struct platform_device *pdev)
mc->soc = of_device_get_match_data(&pdev->dev);
mc->dev = &pdev->dev;
+ mask = DMA_BIT_MASK(mc->soc->num_address_bits);
+
+ err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
/* length of MC tick in nanoseconds */
mc->tick = 30;
@@ -658,6 +637,9 @@ static int tegra_mc_probe(struct platform_device *pdev)
} else
#endif
{
+ /* ensure that debug features are disabled */
+ mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
+
err = tegra_mc_setup_latency_allowance(mc);
if (err < 0) {
dev_err(&pdev->dev,
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index f9353494b708..957c6eb74ff9 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -6,20 +6,76 @@
#ifndef MEMORY_TEGRA_MC_H
#define MEMORY_TEGRA_MC_H
+#include <linux/bits.h>
#include <linux/io.h>
#include <linux/types.h>
#include <soc/tegra/mc.h>
-#define MC_INT_DECERR_MTS (1 << 16)
-#define MC_INT_SECERR_SEC (1 << 13)
-#define MC_INT_DECERR_VPR (1 << 12)
-#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
-#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
-#define MC_INT_ARBITRATION_EMEM (1 << 9)
-#define MC_INT_SECURITY_VIOLATION (1 << 8)
-#define MC_INT_INVALID_GART_PAGE (1 << 7)
-#define MC_INT_DECERR_EMEM (1 << 6)
+#define MC_INTSTATUS 0x00
+#define MC_INTMASK 0x04
+#define MC_ERR_STATUS 0x08
+#define MC_ERR_ADR 0x0c
+#define MC_GART_ERROR_REQ 0x30
+#define MC_EMEM_ADR_CFG 0x54
+#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
+#define MC_SECURITY_VIOLATION_STATUS 0x74
+#define MC_EMEM_ARB_CFG 0x90
+#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
+#define MC_EMEM_ARB_TIMING_RCD 0x98
+#define MC_EMEM_ARB_TIMING_RP 0x9c
+#define MC_EMEM_ARB_TIMING_RC 0xa0
+#define MC_EMEM_ARB_TIMING_RAS 0xa4
+#define MC_EMEM_ARB_TIMING_FAW 0xa8
+#define MC_EMEM_ARB_TIMING_RRD 0xac
+#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
+#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
+#define MC_EMEM_ARB_TIMING_R2R 0xb8
+#define MC_EMEM_ARB_TIMING_W2W 0xbc
+#define MC_EMEM_ARB_TIMING_R2W 0xc0
+#define MC_EMEM_ARB_TIMING_W2R 0xc4
+#define MC_EMEM_ARB_DA_TURNS 0xd0
+#define MC_EMEM_ARB_DA_COVERS 0xd4
+#define MC_EMEM_ARB_MISC0 0xd8
+#define MC_EMEM_ARB_MISC1 0xdc
+#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
+#define MC_EMEM_ARB_OVERRIDE 0xe8
+#define MC_TIMING_CONTROL_DBG 0xf8
+#define MC_TIMING_CONTROL 0xfc
+
+#define MC_INT_DECERR_MTS BIT(16)
+#define MC_INT_SECERR_SEC BIT(13)
+#define MC_INT_DECERR_VPR BIT(12)
+#define MC_INT_INVALID_APB_ASID_UPDATE BIT(11)
+#define MC_INT_INVALID_SMMU_PAGE BIT(10)
+#define MC_INT_ARBITRATION_EMEM BIT(9)
+#define MC_INT_SECURITY_VIOLATION BIT(8)
+#define MC_INT_INVALID_GART_PAGE BIT(7)
+#define MC_INT_DECERR_EMEM BIT(6)
+
+#define MC_ERR_STATUS_TYPE_SHIFT 28
+#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (0x6 << 28)
+#define MC_ERR_STATUS_TYPE_MASK (0x7 << 28)
+#define MC_ERR_STATUS_READABLE BIT(27)
+#define MC_ERR_STATUS_WRITABLE BIT(26)
+#define MC_ERR_STATUS_NONSECURE BIT(25)
+#define MC_ERR_STATUS_ADR_HI_SHIFT 20
+#define MC_ERR_STATUS_ADR_HI_MASK 0x3
+#define MC_ERR_STATUS_SECURITY BIT(17)
+#define MC_ERR_STATUS_RW BIT(16)
+
+#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
+
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) ((x) & 0x1ff)
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
+
+#define MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK 0x1ff
+#define MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE BIT(30)
+#define MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE BIT(31)
+
+#define MC_EMEM_ARB_OVERRIDE_EACK_MASK 0x3
+
+#define MC_TIMING_UPDATE BIT(0)
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index ac8351b5beeb..48ef01c3ff90 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -909,16 +909,18 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
{ .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
};
-static const unsigned int tegra114_group_display[] = {
+static const unsigned int tegra114_group_drm[] = {
TEGRA_SWGROUP_DC,
TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
};
static const struct tegra_smmu_group_soc tegra114_groups[] = {
{
- .name = "display",
- .swgroups = tegra114_group_display,
- .num_swgroups = ARRAY_SIZE(tegra114_group_display),
+ .name = "drm",
+ .swgroups = tegra114_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra114_group_drm),
},
};
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 5d0ccb2be206..493b5dc3a4b3 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -10,26 +10,6 @@
#include "mc.h"
-#define MC_EMEM_ARB_CFG 0x90
-#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
-#define MC_EMEM_ARB_TIMING_RCD 0x98
-#define MC_EMEM_ARB_TIMING_RP 0x9c
-#define MC_EMEM_ARB_TIMING_RC 0xa0
-#define MC_EMEM_ARB_TIMING_RAS 0xa4
-#define MC_EMEM_ARB_TIMING_FAW 0xa8
-#define MC_EMEM_ARB_TIMING_RRD 0xac
-#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
-#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
-#define MC_EMEM_ARB_TIMING_R2R 0xb8
-#define MC_EMEM_ARB_TIMING_W2W 0xbc
-#define MC_EMEM_ARB_TIMING_R2W 0xc0
-#define MC_EMEM_ARB_TIMING_W2R 0xc4
-#define MC_EMEM_ARB_DA_TURNS 0xd0
-#define MC_EMEM_ARB_DA_COVERS 0xd4
-#define MC_EMEM_ARB_MISC0 0xd8
-#define MC_EMEM_ARB_MISC1 0xdc
-#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
-
static const struct tegra_mc_client tegra124_mc_clients[] = {
{
.id = 0x00,
@@ -974,16 +954,18 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
{ .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
};
-static const unsigned int tegra124_group_display[] = {
+static const unsigned int tegra124_group_drm[] = {
TEGRA_SWGROUP_DC,
TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_GPU,
+ TEGRA_SWGROUP_VIC,
};
static const struct tegra_smmu_group_soc tegra124_groups[] = {
{
- .name = "display",
- .swgroups = tegra124_group_display,
- .num_swgroups = ARRAY_SIZE(tegra124_group_display),
+ .name = "drm",
+ .swgroups = tegra124_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra124_group_drm),
},
};
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 9ee5bef49e47..1b23b1c34476 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -6,10 +6,11 @@
*/
#include <linux/clk.h>
+#include <linux/clk/tegra.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/interrupt.h>
-#include <linux/iopoll.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,6 +22,7 @@
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
#define EMC_TIMING_CONTROL 0x028
#define EMC_RC 0x02c
#define EMC_RFC 0x030
@@ -79,6 +81,12 @@
#define EMC_REFRESH_OVERFLOW_INT BIT(3)
#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_READ_DQM_CTRL BIT(9)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -137,9 +145,6 @@ struct tegra_emc {
struct device *dev;
struct completion clk_handshake_complete;
struct notifier_block clk_nb;
- struct clk *backup_clk;
- struct clk *emc_mux;
- struct clk *pll_m;
struct clk *clk;
void __iomem *regs;
@@ -219,7 +224,7 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
{
- long timeout;
+ unsigned long timeout;
dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
@@ -231,14 +236,10 @@ static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
}
timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- usecs_to_jiffies(100));
+ msecs_to_jiffies(100));
if (timeout == 0) {
dev_err(emc->dev, "EMC-CAR handshake failed\n");
return -EIO;
- } else if (timeout < 0) {
- dev_err(emc->dev, "failed to wait for EMC-CAR handshake: %ld\n",
- timeout);
- return timeout;
}
return 0;
@@ -363,6 +364,13 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
NULL);
+ dev_info(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
return 0;
}
@@ -398,7 +406,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
- u32 emc_cfg;
+ u32 emc_cfg, emc_dbg;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -421,42 +429,53 @@ static int emc_setup_hw(struct tegra_emc *emc)
writel_relaxed(intmask, emc->regs + EMC_INTMASK);
writel_relaxed(intmask, emc->regs + EMC_INTSTATUS);
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
return 0;
}
-static int emc_init(struct tegra_emc *emc, unsigned long rate)
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
{
- int err;
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
- err = clk_set_parent(emc->emc_mux, emc->backup_clk);
- if (err) {
- dev_err(emc->dev,
- "failed to reparent to backup source: %d\n", err);
- return err;
- }
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
- err = clk_set_rate(emc->pll_m, rate);
- if (err) {
- dev_err(emc->dev,
- "failed to change pll_m rate: %d\n", err);
- return err;
- }
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
- err = clk_set_parent(emc->emc_mux, emc->pll_m);
- if (err) {
- dev_err(emc->dev,
- "failed to reparent to pll_m: %d\n", err);
- return err;
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
}
- err = clk_set_rate(emc->clk, rate);
- if (err) {
- dev_err(emc->dev,
- "failed to change emc rate: %d\n", err);
- return err;
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
}
- return 0;
+ return timing->rate;
}
static int tegra_emc_probe(struct platform_device *pdev)
@@ -515,57 +534,26 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
}
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
emc->clk = devm_clk_get(&pdev->dev, "emc");
if (IS_ERR(emc->clk)) {
err = PTR_ERR(emc->clk);
dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- return err;
- }
-
- emc->pll_m = clk_get_sys(NULL, "pll_m");
- if (IS_ERR(emc->pll_m)) {
- err = PTR_ERR(emc->pll_m);
- dev_err(&pdev->dev, "failed to get pll_m clock: %d\n", err);
- return err;
- }
-
- emc->backup_clk = clk_get_sys(NULL, "pll_p");
- if (IS_ERR(emc->backup_clk)) {
- err = PTR_ERR(emc->backup_clk);
- dev_err(&pdev->dev, "failed to get pll_p clock: %d\n", err);
- goto put_pll_m;
- }
-
- emc->emc_mux = clk_get_parent(emc->clk);
- if (IS_ERR(emc->emc_mux)) {
- err = PTR_ERR(emc->emc_mux);
- dev_err(&pdev->dev, "failed to get emc_mux clock: %d\n", err);
- goto put_backup;
+ goto unset_cb;
}
err = clk_notifier_register(emc->clk, &emc->clk_nb);
if (err) {
dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
err);
- goto put_backup;
- }
-
- /* set DRAM clock rate to maximum */
- err = emc_init(emc, emc->timings[emc->num_timings - 1].rate);
- if (err) {
- dev_err(&pdev->dev, "failed to initialize EMC clock rate: %d\n",
- err);
- goto unreg_notifier;
+ goto unset_cb;
}
return 0;
-unreg_notifier:
- clk_notifier_unregister(emc->clk, &emc->clk_nb);
-put_backup:
- clk_put(emc->backup_clk);
-put_pll_m:
- clk_put(emc->pll_m);
+unset_cb:
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
return err;
}
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
new file mode 100644
index 000000000000..0b6a5e451ea3
--- /dev/null
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Tegra30 External Memory Controller driver
+ *
+ * Based on downstream driver from NVIDIA and tegra124-emc.c
+ * Copyright (C) 2011-2014 NVIDIA Corporation
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/types.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "mc.h"
+
+#define EMC_INTSTATUS 0x000
+#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
+#define EMC_CFG 0x00c
+#define EMC_REFCTRL 0x020
+#define EMC_TIMING_CONTROL 0x028
+#define EMC_RC 0x02c
+#define EMC_RFC 0x030
+#define EMC_RAS 0x034
+#define EMC_RP 0x038
+#define EMC_R2W 0x03c
+#define EMC_W2R 0x040
+#define EMC_R2P 0x044
+#define EMC_W2P 0x048
+#define EMC_RD_RCD 0x04c
+#define EMC_WR_RCD 0x050
+#define EMC_RRD 0x054
+#define EMC_REXT 0x058
+#define EMC_WDV 0x05c
+#define EMC_QUSE 0x060
+#define EMC_QRST 0x064
+#define EMC_QSAFE 0x068
+#define EMC_RDV 0x06c
+#define EMC_REFRESH 0x070
+#define EMC_BURST_REFRESH_NUM 0x074
+#define EMC_PDEX2WR 0x078
+#define EMC_PDEX2RD 0x07c
+#define EMC_PCHG2PDEN 0x080
+#define EMC_ACT2PDEN 0x084
+#define EMC_AR2PDEN 0x088
+#define EMC_RW2PDEN 0x08c
+#define EMC_TXSR 0x090
+#define EMC_TCKE 0x094
+#define EMC_TFAW 0x098
+#define EMC_TRPAB 0x09c
+#define EMC_TCLKSTABLE 0x0a0
+#define EMC_TCLKSTOP 0x0a4
+#define EMC_TREFBW 0x0a8
+#define EMC_QUSE_EXTRA 0x0ac
+#define EMC_ODT_WRITE 0x0b0
+#define EMC_ODT_READ 0x0b4
+#define EMC_WEXT 0x0b8
+#define EMC_CTT 0x0bc
+#define EMC_MRS_WAIT_CNT 0x0c8
+#define EMC_MRS 0x0cc
+#define EMC_EMRS 0x0d0
+#define EMC_SELF_REF 0x0e0
+#define EMC_MRW 0x0e8
+#define EMC_XM2DQSPADCTRL3 0x0f8
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG6 0x114
+#define EMC_CFG_RSV 0x120
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_STATUS 0x2b4
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_CTT_DURATION 0x2d8
+#define EMC_CTT_TERM_CTRL 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_XM2CMDPADCTRL 0x2f0
+#define EMC_XM2DQSPADCTRL2 0x2fc
+#define EMC_XM2DQPADCTRL2 0x304
+#define EMC_XM2CLKPADCTRL 0x308
+#define EMC_XM2COMPPADCTRL 0x30c
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+#define EMC_XM2QUSEPADCTRL 0x318
+#define EMC_DLL_XFORM_DQS0 0x328
+#define EMC_DLL_XFORM_DQS1 0x32c
+#define EMC_DLL_XFORM_DQS2 0x330
+#define EMC_DLL_XFORM_DQS3 0x334
+#define EMC_DLL_XFORM_DQS4 0x338
+#define EMC_DLL_XFORM_DQS5 0x33c
+#define EMC_DLL_XFORM_DQS6 0x340
+#define EMC_DLL_XFORM_DQS7 0x344
+#define EMC_DLL_XFORM_QUSE0 0x348
+#define EMC_DLL_XFORM_QUSE1 0x34c
+#define EMC_DLL_XFORM_QUSE2 0x350
+#define EMC_DLL_XFORM_QUSE3 0x354
+#define EMC_DLL_XFORM_QUSE4 0x358
+#define EMC_DLL_XFORM_QUSE5 0x35c
+#define EMC_DLL_XFORM_QUSE6 0x360
+#define EMC_DLL_XFORM_QUSE7 0x364
+#define EMC_DLL_XFORM_DQ0 0x368
+#define EMC_DLL_XFORM_DQ1 0x36c
+#define EMC_DLL_XFORM_DQ2 0x370
+#define EMC_DLL_XFORM_DQ3 0x374
+#define EMC_DLI_TRIM_TXDQS0 0x3a8
+#define EMC_DLI_TRIM_TXDQS1 0x3ac
+#define EMC_DLI_TRIM_TXDQS2 0x3b0
+#define EMC_DLI_TRIM_TXDQS3 0x3b4
+#define EMC_DLI_TRIM_TXDQS4 0x3b8
+#define EMC_DLI_TRIM_TXDQS5 0x3bc
+#define EMC_DLI_TRIM_TXDQS6 0x3c0
+#define EMC_DLI_TRIM_TXDQS7 0x3c4
+#define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+
+#define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+
+#define EMC_MODE_SET_DLL_RESET BIT(8)
+#define EMC_MODE_SET_LONG_CNT BIT(26)
+
+#define EMC_SELF_REF_CMD_ENABLED BIT(0)
+
+#define DRAM_DEV_SEL_ALL (0 << 30)
+#define DRAM_DEV_SEL_0 (2 << 30)
+#define DRAM_DEV_SEL_1 (1 << 30)
+#define DRAM_BROADCAST(num) \
+ ((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
+
+#define EMC_ZQ_CAL_CMD BIT(0)
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_LONG_CMD_DEV0 \
+ (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+#define EMC_ZQ_CAL_LONG_CMD_DEV1 \
+ (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
+#define EMC_CFG5_QUSE_MODE_SHIFT 13
+#define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT)
+
+#define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2
+#define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3
+
+#define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9)
+
+#define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10)
+
+#define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4)
+
+#define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5)
+#define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5)
+
+#define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
+
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
+ (0x3ff << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+
+#define EMC_REFCTRL_DEV_SEL_MASK 0x3
+#define EMC_REFCTRL_ENABLE BIT(31)
+#define EMC_REFCTRL_ENABLE_ALL(num) \
+ (((num) > 1 ? 0 : 2) | EMC_REFCTRL_ENABLE)
+#define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2)
+
+#define EMC_CFG_PERIODIC_QRST BIT(21)
+#define EMC_CFG_DYN_SREF_ENABLE BIT(28)
+
+#define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
+#define EMC_CLKCHANGE_PD_ENABLE BIT(1)
+#define EMC_CLKCHANGE_SR_ENABLE BIT(2)
+
+#define EMC_TIMING_UPDATE BIT(0)
+
+#define EMC_REFRESH_OVERFLOW_INT BIT(3)
+#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+
+enum emc_dram_type {
+ DRAM_TYPE_DDR3,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
+enum emc_dll_change {
+ DLL_CHANGE_NONE,
+ DLL_CHANGE_ON,
+ DLL_CHANGE_OFF
+};
+
+static const u16 emc_timing_registers[] = {
+ [0] = EMC_RC,
+ [1] = EMC_RFC,
+ [2] = EMC_RAS,
+ [3] = EMC_RP,
+ [4] = EMC_R2W,
+ [5] = EMC_W2R,
+ [6] = EMC_R2P,
+ [7] = EMC_W2P,
+ [8] = EMC_RD_RCD,
+ [9] = EMC_WR_RCD,
+ [10] = EMC_RRD,
+ [11] = EMC_REXT,
+ [12] = EMC_WEXT,
+ [13] = EMC_WDV,
+ [14] = EMC_QUSE,
+ [15] = EMC_QRST,
+ [16] = EMC_QSAFE,
+ [17] = EMC_RDV,
+ [18] = EMC_REFRESH,
+ [19] = EMC_BURST_REFRESH_NUM,
+ [20] = EMC_PRE_REFRESH_REQ_CNT,
+ [21] = EMC_PDEX2WR,
+ [22] = EMC_PDEX2RD,
+ [23] = EMC_PCHG2PDEN,
+ [24] = EMC_ACT2PDEN,
+ [25] = EMC_AR2PDEN,
+ [26] = EMC_RW2PDEN,
+ [27] = EMC_TXSR,
+ [28] = EMC_TXSRDLL,
+ [29] = EMC_TCKE,
+ [30] = EMC_TFAW,
+ [31] = EMC_TRPAB,
+ [32] = EMC_TCLKSTABLE,
+ [33] = EMC_TCLKSTOP,
+ [34] = EMC_TREFBW,
+ [35] = EMC_QUSE_EXTRA,
+ [36] = EMC_FBIO_CFG6,
+ [37] = EMC_ODT_WRITE,
+ [38] = EMC_ODT_READ,
+ [39] = EMC_FBIO_CFG5,
+ [40] = EMC_CFG_DIG_DLL,
+ [41] = EMC_CFG_DIG_DLL_PERIOD,
+ [42] = EMC_DLL_XFORM_DQS0,
+ [43] = EMC_DLL_XFORM_DQS1,
+ [44] = EMC_DLL_XFORM_DQS2,
+ [45] = EMC_DLL_XFORM_DQS3,
+ [46] = EMC_DLL_XFORM_DQS4,
+ [47] = EMC_DLL_XFORM_DQS5,
+ [48] = EMC_DLL_XFORM_DQS6,
+ [49] = EMC_DLL_XFORM_DQS7,
+ [50] = EMC_DLL_XFORM_QUSE0,
+ [51] = EMC_DLL_XFORM_QUSE1,
+ [52] = EMC_DLL_XFORM_QUSE2,
+ [53] = EMC_DLL_XFORM_QUSE3,
+ [54] = EMC_DLL_XFORM_QUSE4,
+ [55] = EMC_DLL_XFORM_QUSE5,
+ [56] = EMC_DLL_XFORM_QUSE6,
+ [57] = EMC_DLL_XFORM_QUSE7,
+ [58] = EMC_DLI_TRIM_TXDQS0,
+ [59] = EMC_DLI_TRIM_TXDQS1,
+ [60] = EMC_DLI_TRIM_TXDQS2,
+ [61] = EMC_DLI_TRIM_TXDQS3,
+ [62] = EMC_DLI_TRIM_TXDQS4,
+ [63] = EMC_DLI_TRIM_TXDQS5,
+ [64] = EMC_DLI_TRIM_TXDQS6,
+ [65] = EMC_DLI_TRIM_TXDQS7,
+ [66] = EMC_DLL_XFORM_DQ0,
+ [67] = EMC_DLL_XFORM_DQ1,
+ [68] = EMC_DLL_XFORM_DQ2,
+ [69] = EMC_DLL_XFORM_DQ3,
+ [70] = EMC_XM2CMDPADCTRL,
+ [71] = EMC_XM2DQSPADCTRL2,
+ [72] = EMC_XM2DQPADCTRL2,
+ [73] = EMC_XM2CLKPADCTRL,
+ [74] = EMC_XM2COMPPADCTRL,
+ [75] = EMC_XM2VTTGENPADCTRL,
+ [76] = EMC_XM2VTTGENPADCTRL2,
+ [77] = EMC_XM2QUSEPADCTRL,
+ [78] = EMC_XM2DQSPADCTRL3,
+ [79] = EMC_CTT_TERM_CTRL,
+ [80] = EMC_ZCAL_INTERVAL,
+ [81] = EMC_ZCAL_WAIT_CNT,
+ [82] = EMC_MRS_WAIT_CNT,
+ [83] = EMC_AUTO_CAL_CONFIG,
+ [84] = EMC_CTT,
+ [85] = EMC_CTT_DURATION,
+ [86] = EMC_DYN_SELF_REF_CONTROL,
+ [87] = EMC_FBIO_SPARE,
+ [88] = EMC_CFG_RSV,
+};
+
+struct emc_timing {
+ unsigned long rate;
+
+ u32 data[ARRAY_SIZE(emc_timing_registers)];
+
+ u32 emc_auto_cal_interval;
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+ u32 emc_zcal_cnt_long;
+ bool emc_cfg_periodic_qrst;
+ bool emc_cfg_dyn_self_ref;
+};
+
+struct tegra_emc {
+ struct device *dev;
+ struct tegra_mc *mc;
+ struct completion clk_handshake_complete;
+ struct notifier_block clk_nb;
+ struct clk *clk;
+ void __iomem *regs;
+ unsigned int irq;
+
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ u32 mc_override;
+ u32 emc_cfg;
+
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+
+ bool vref_cal_toggle : 1;
+ bool zcal_long : 1;
+ bool dll_on : 1;
+ bool prepared : 1;
+ bool bad_state : 1;
+};
+
+static irqreturn_t tegra_emc_isr(int irq, void *data)
+{
+ struct tegra_emc *emc = data;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 status;
+
+ status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ /* notify about EMC-CAR handshake completion */
+ if (status & EMC_CLKCHANGE_COMPLETE_INT)
+ complete(&emc->clk_handshake_complete);
+
+ /* notify about HW problem */
+ if (status & EMC_REFRESH_OVERFLOW_INT)
+ dev_err_ratelimited(emc->dev,
+ "refresh request overflow timeout\n");
+
+ /* clear interrupts */
+ writel_relaxed(status, emc->regs + EMC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static struct emc_timing *emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate >= rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing,
+ bool *schmitt_to_vref)
+{
+ bool preset = false;
+ u32 val;
+
+ if (timing->data[71] & EMC_XM2DQSPADCTRL2_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL2);
+
+ if (!(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL2);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[78] & EMC_XM2DQSPADCTRL3_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL3);
+
+ if (!(val & EMC_XM2DQSPADCTRL3_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL3_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL3);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[77] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2QUSEPADCTRL);
+
+ if (!(val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE)) {
+ val |= EMC_XM2QUSEPADCTRL_IVREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2QUSEPADCTRL);
+
+ *schmitt_to_vref = true;
+ preset = true;
+ }
+ }
+
+ return preset;
+}
+
+static int emc_seq_update_timing(struct tegra_emc *emc)
+{
+ u32 val;
+ int err;
+
+ writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
+ !(val & EMC_STATUS_TIMING_UPDATE_STALLED),
+ 1, 200);
+ if (err) {
+ dev_err(emc->dev, "failed to update timing: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int misc0_index = 16;
+ unsigned int i;
+ bool same;
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (mc->timings[i].rate != rate)
+ continue;
+
+ if (mc->timings[i].emem_data[misc0_index] & BIT(27))
+ same = true;
+ else
+ same = false;
+
+ return tegra20_clk_prepare_emc_mc_same_freq(emc->clk, same);
+ }
+
+ return -EINVAL;
+}
+
+static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ enum emc_dll_change dll_change;
+ enum emc_dram_type dram_type;
+ bool schmitt_to_vref = false;
+ unsigned int pre_wait = 0;
+ bool qrst_used = false;
+ unsigned int dram_num;
+ unsigned int i;
+ u32 fbio_cfg5;
+ u32 emc_dbg;
+ u32 val;
+ int err;
+
+ if (!timing || emc->bad_state)
+ return -EINVAL;
+
+ dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n",
+ __func__, timing->rate, rate);
+
+ emc->bad_state = true;
+
+ err = emc_prepare_mc_clk_cfg(emc, rate);
+ if (err) {
+ dev_err(emc->dev, "mc clock preparation failed: %d\n", err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = false;
+ emc->mc_override = mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
+ emc->emc_cfg = readl_relaxed(emc->regs + EMC_CFG);
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+
+ if (emc->dll_on == !!(timing->emc_mode_1 & 0x1))
+ dll_change = DLL_CHANGE_NONE;
+ else if (timing->emc_mode_1 & 0x1)
+ dll_change = DLL_CHANGE_ON;
+ else
+ dll_change = DLL_CHANGE_OFF;
+
+ emc->dll_on = !!(timing->emc_mode_1 & 0x1);
+
+ if (timing->data[80] && !readl_relaxed(emc->regs + EMC_ZCAL_INTERVAL))
+ emc->zcal_long = true;
+ else
+ emc->zcal_long = false;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+
+ /* disable dynamic self-refresh */
+ if (emc->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
+ emc->emc_cfg &= ~EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+
+ pre_wait = 5;
+ }
+
+ /* update MC arbiter settings */
+ val = mc_readl(emc->mc, MC_EMEM_ARB_OUTSTANDING_REQ);
+ if (!(val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
+ ((val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > 0x50)) {
+
+ val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
+ MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50;
+ mc_writel(emc->mc, val, MC_EMEM_ARB_OUTSTANDING_REQ);
+ mc_writel(emc->mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+ }
+
+ if (emc->mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK)
+ mc_writel(emc->mc,
+ emc->mc_override & ~MC_EMEM_ARB_OVERRIDE_EACK_MASK,
+ MC_EMEM_ARB_OVERRIDE);
+
+ /* check DQ/DQS VREF delay */
+ if (emc_dqs_preset(emc, timing, &schmitt_to_vref)) {
+ if (pre_wait < 3)
+ pre_wait = 3;
+ }
+
+ if (pre_wait) {
+ err = emc_seq_update_timing(emc);
+ if (err)
+ return err;
+
+ udelay(pre_wait);
+ }
+
+ /* disable auto-calibration if VREF mode is switching */
+ if (timing->emc_auto_cal_interval) {
+ val = readl_relaxed(emc->regs + EMC_XM2COMPPADCTRL);
+ val ^= timing->data[74];
+
+ if (val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE) {
+ writel_relaxed(0, emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ err = readl_relaxed_poll_timeout_atomic(
+ emc->regs + EMC_AUTO_CAL_STATUS, val,
+ !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300);
+ if (err) {
+ dev_err(emc->dev,
+ "failed to disable auto-cal: %d\n",
+ err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = true;
+ }
+ }
+
+ /* program shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->data); i++) {
+ /* EMC_XM2CLKPADCTRL should be programmed separately */
+ if (i != 73)
+ writel_relaxed(timing->data[i],
+ emc->regs + emc_timing_registers[i]);
+ }
+
+ err = tegra_mc_write_emem_configuration(emc->mc, timing->rate);
+ if (err)
+ return err;
+
+ /* DDR3: predict MRS long wait count */
+ if (dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_ON) {
+ u32 cnt = 512;
+
+ if (emc->zcal_long)
+ cnt -= dram_num * 256;
+
+ val = timing->data[82] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK;
+ if (cnt < val)
+ cnt = val;
+
+ val = timing->data[82] & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+ val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
+ EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+
+ writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT);
+ }
+
+ /* disable interrupt since read access is prohibited after stalling */
+ disable_irq(emc->irq);
+
+ /* this read also completes the writes */
+ val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL);
+
+ if (!(val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) && schmitt_to_vref) {
+ u32 cur_mode, new_mode;
+
+ cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK;
+ cur_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ new_mode = timing->data[39] & EMC_CFG5_QUSE_MODE_MASK;
+ new_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ if ((cur_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ cur_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK) ||
+ (new_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ new_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK))
+ qrst_used = true;
+ }
+
+ /* flow control marker 1 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE);
+
+ /* enable periodic reset */
+ if (qrst_used) {
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ emc->regs + EMC_DBG);
+ writel_relaxed(emc->emc_cfg | EMC_CFG_PERIODIC_QRST,
+ emc->regs + EMC_CFG);
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ }
+
+ /* disable auto-refresh to save time after clock change */
+ writel_relaxed(EMC_REFCTRL_DISABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* turn off DLL and enter self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (dll_change == DLL_CHANGE_OFF)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ writel_relaxed(DRAM_BROADCAST(dram_num) |
+ EMC_SELF_REF_CMD_ENABLED,
+ emc->regs + EMC_SELF_REF);
+ }
+
+ /* flow control marker 2 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
+
+ /* enable write-active MUX, update unshadowed pad control */
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, emc->regs + EMC_DBG);
+ writel_relaxed(timing->data[73], emc->regs + EMC_XM2CLKPADCTRL);
+
+ /* restore periodic QRST and disable write-active MUX */
+ val = !!(emc->emc_cfg & EMC_CFG_PERIODIC_QRST);
+ if (qrst_used || timing->emc_cfg_periodic_qrst != val) {
+ if (timing->emc_cfg_periodic_qrst)
+ emc->emc_cfg |= EMC_CFG_PERIODIC_QRST;
+ else
+ emc->emc_cfg &= ~EMC_CFG_PERIODIC_QRST;
+
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ /* exit self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3)
+ writel_relaxed(DRAM_BROADCAST(dram_num),
+ emc->regs + EMC_SELF_REF);
+
+ /* set DRAM-mode registers */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_reset != emc->emc_mode_reset ||
+ dll_change == DLL_CHANGE_ON) {
+ val = timing->emc_mode_reset;
+ if (dll_change == DLL_CHANGE_ON) {
+ val |= EMC_MODE_SET_DLL_RESET;
+ val |= EMC_MODE_SET_LONG_CNT;
+ } else {
+ val &= ~EMC_MODE_SET_DLL_RESET;
+ }
+ writel_relaxed(val, emc->regs + EMC_MRS);
+ }
+ } else {
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_MRW);
+
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_MRW);
+ }
+
+ emc->emc_mode_1 = timing->emc_mode_1;
+ emc->emc_mode_2 = timing->emc_mode_2;
+ emc->emc_mode_reset = timing->emc_mode_reset;
+
+ /* issue ZCAL command if turning ZCAL on */
+ if (emc->zcal_long) {
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV0,
+ emc->regs + EMC_ZQ_CAL);
+
+ if (dram_num > 1)
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV1,
+ emc->regs + EMC_ZQ_CAL);
+ }
+
+ /* re-enable auto-refresh */
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* flow control marker 3 */
+ writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE);
+
+ reinit_completion(&emc->clk_handshake_complete);
+
+ /* interrupt can be re-enabled now */
+ enable_irq(emc->irq);
+
+ emc->bad_state = false;
+ emc->prepared = true;
+
+ return 0;
+}
+
+static int emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ unsigned long timeout;
+ int ret;
+
+ timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
+ msecs_to_jiffies(100));
+ if (timeout == 0) {
+ dev_err(emc->dev, "emc-car handshake failed\n");
+ emc->bad_state = true;
+ return -EIO;
+ }
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ udelay(2);
+ /* update restored timing */
+ ret = emc_seq_update_timing(emc);
+ if (ret)
+ emc->bad_state = true;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ emc->prepared = false;
+
+ return ret;
+}
+
+static int emc_unprepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ if (emc->prepared && !emc->bad_state) {
+ /* shouldn't ever happen in practice */
+ dev_err(emc->dev, "timing configuration can't be reverted\n");
+ emc->bad_state = true;
+ }
+
+ return 0;
+}
+
+static int emc_clk_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
+ struct clk_notifier_data *cnd = data;
+ int err;
+
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ err = emc_prepare_timing_change(emc, cnd->new_rate);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = emc_unprepare_timing_change(emc, cnd->old_rate);
+ break;
+
+ case POST_RATE_CHANGE:
+ err = emc_complete_timing_change(emc, cnd->new_rate);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 value;
+ int err;
+
+ err = of_property_read_u32(node, "clock-frequency", &value);
+ if (err) {
+ dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ timing->rate = value;
+
+ err = of_property_read_u32_array(node, "nvidia,emc-configuration",
+ timing->data,
+ ARRAY_SIZE(emc_timing_registers));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOF: failed to read emc timing data: %d\n",
+ node, err);
+ return err;
+ }
+
+#define EMC_READ_BOOL(prop, dtprop) \
+ timing->prop = of_property_read_bool(node, dtprop);
+
+#define EMC_READ_U32(prop, dtprop) \
+ err = of_property_read_u32(node, dtprop, &timing->prop); \
+ if (err) { \
+ dev_err(emc->dev, \
+ "timing %pOFn: failed to read " #prop ": %d\n", \
+ node, err); \
+ return err; \
+ }
+
+ EMC_READ_U32(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval")
+ EMC_READ_U32(emc_mode_1, "nvidia,emc-mode-1")
+ EMC_READ_U32(emc_mode_2, "nvidia,emc-mode-2")
+ EMC_READ_U32(emc_mode_reset, "nvidia,emc-mode-reset")
+ EMC_READ_U32(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long")
+ EMC_READ_BOOL(emc_cfg_dyn_self_ref, "nvidia,emc-cfg-dyn-self-ref")
+ EMC_READ_BOOL(emc_cfg_periodic_qrst, "nvidia,emc-cfg-periodic-qrst")
+
+#undef EMC_READ_U32
+#undef EMC_READ_BOOL
+
+ dev_dbg(emc->dev, "%s: %pOF: rate %lu\n", __func__, node, timing->rate);
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+
+ if (a->rate > b->rate)
+ return 1;
+
+ return 0;
+}
+
+static int emc_check_mc_timings(struct tegra_emc *emc)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int i;
+
+ if (emc->num_timings != mc->num_timings) {
+ dev_err(emc->dev, "emc/mc timings number mismatch: %u %u\n",
+ emc->num_timings, mc->num_timings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (emc->timings[i].rate != mc->timings[i].rate) {
+ dev_err(emc->dev,
+ "emc/mc timing rate mismatch: %lu %lu\n",
+ emc->timings[i].rate, mc->timings[i].rate);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ struct device_node *child;
+ struct emc_timing *timing;
+ int child_count;
+ int err;
+
+ child_count = of_get_child_count(node);
+ if (!child_count) {
+ dev_err(emc->dev, "no memory timings in: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ emc->num_timings = child_count;
+ timing = emc->timings;
+
+ for_each_child_of_node(node, child) {
+ err = load_one_timing_from_dt(emc, timing++, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ err = emc_check_mc_timings(emc);
+ if (err)
+ return err;
+
+ dev_info(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
+ return 0;
+}
+
+static struct device_node *emc_find_node_by_ram_code(struct device *dev)
+{
+ struct device_node *np;
+ u32 value, ram_code;
+ int err;
+
+ ram_code = tegra_read_ram_code();
+
+ for_each_child_of_node(dev->of_node, np) {
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || value != ram_code)
+ continue;
+
+ return np;
+ }
+
+ dev_err(dev, "no memory timings for RAM code %u found in device-tree\n",
+ ram_code);
+
+ return NULL;
+}
+
+static int emc_setup_hw(struct tegra_emc *emc)
+{
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 fbio_cfg5, emc_cfg, emc_dbg;
+ enum emc_dram_type dram_type;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
+
+ /* enable EMC and CAR to handshake on PLL divider/source changes */
+ emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE;
+
+ /* configure clock change mode accordingly to DRAM type */
+ switch (dram_type) {
+ case DRAM_TYPE_LPDDR2:
+ emc_cfg |= EMC_CLKCHANGE_PD_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ break;
+
+ default:
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_PD_ENABLE;
+ break;
+ }
+
+ writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2);
+
+ /* initialize interrupt */
+ writel_relaxed(intmask, emc->regs + EMC_INTMASK);
+ writel_relaxed(0xffffffff, emc->regs + EMC_INTSTATUS);
+
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ return 0;
+}
+
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
+{
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
+
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
+
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ return timing->rate;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct platform_device *mc;
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int err;
+
+ if (of_get_child_count(pdev->dev.of_node) == 0) {
+ dev_info(&pdev->dev,
+ "device-tree node doesn't have memory timings\n");
+ return -ENODEV;
+ }
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "could not get memory controller node\n");
+ return -ENOENT;
+ }
+
+ mc = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!mc)
+ return -ENOENT;
+
+ np = emc_find_node_by_ram_code(&pdev->dev);
+ if (!np)
+ return -EINVAL;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ emc->mc = platform_get_drvdata(mc);
+ if (!emc->mc)
+ return -EPROBE_DEFER;
+
+ init_completion(&emc->clk_handshake_complete);
+ emc->clk_nb.notifier_call = emc_clk_change_notify;
+ emc->dev = &pdev->dev;
+
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ err = emc_setup_hw(emc);
+ if (err)
+ return err;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "interrupt not specified: %d\n", err);
+ return err;
+ }
+ emc->irq = err;
+
+ err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
+ dev_name(&pdev->dev), emc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", err);
+ return err;
+ }
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
+ goto unset_cb;
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
+ err);
+ goto unset_cb;
+ }
+
+ platform_set_drvdata(pdev, emc);
+
+ return 0;
+
+unset_cb:
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+
+ return err;
+}
+
+static int tegra_emc_suspend(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ /*
+ * Suspending in a bad state will hang machine. The "prepared" var
+ * shall be always false here unless it's a kernel bug that caused
+ * suspending in a wrong order.
+ */
+ if (WARN_ON(emc->prepared) || emc->bad_state)
+ return -EINVAL;
+
+ emc->bad_state = true;
+
+ return 0;
+}
+
+static int tegra_emc_resume(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ emc_setup_hw(emc);
+ emc->bad_state = false;
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_emc_pm_ops = {
+ .suspend = tegra_emc_suspend,
+ .resume = tegra_emc_resume,
+};
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra30-emc", },
+ {},
+};
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra30-emc",
+ .of_match_table = tegra_emc_of_match,
+ .pm = &tegra_emc_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init tegra_emc_init(void)
+{
+ return platform_driver_register(&tegra_emc_driver);
+}
+subsys_initcall(tegra_emc_init);
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 14788fc2f9e8..fcdd812eed80 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -10,6 +10,27 @@
#include "mc.h"
+static const unsigned long tegra30_mc_emem_regs[] = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_RING1_THROTTLE,
+};
+
static const struct tegra_mc_client tegra30_mc_clients[] = {
{
.id = 0x00,
@@ -931,16 +952,19 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
{ .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
};
-static const unsigned int tegra30_group_display[] = {
+static const unsigned int tegra30_group_drm[] = {
TEGRA_SWGROUP_DC,
TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
+ TEGRA_SWGROUP_NV2,
};
static const struct tegra_smmu_group_soc tegra30_groups[] = {
{
- .name = "display",
- .swgroups = tegra30_group_display,
- .num_swgroups = ARRAY_SIZE(tegra30_group_display),
+ .name = "drm",
+ .swgroups = tegra30_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra30_group_drm),
},
};
@@ -994,6 +1018,8 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.atom_size = 16,
.client_id_mask = 0x7f,
.smmu = &tegra30_smmu_soc,
+ .emem_regs = tegra30_mc_emem_regs,
+ .num_emem_regs = ARRAY_SIZE(tegra30_mc_emem_regs),
.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
MC_INT_DECERR_EMEM,
.reset_ops = &tegra_mc_reset_ops_common,
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index b36142df0295..0a9c5ddf2f59 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -848,7 +848,7 @@ static int jmb38x_ms_count_slots(struct pci_dev *pdev)
{
int cnt, rc = 0;
- for (cnt = 0; cnt < PCI_ROM_RESOURCE; ++cnt) {
+ for (cnt = 0; cnt < PCI_STD_NUM_BARS; ++cnt) {
if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt)))
break;
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 6e6dfd6c1871..c4b977a5dd96 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -78,6 +78,10 @@ static const struct mfd_cell cros_ec_rtc_cells[] = {
{ .name = "cros-ec-rtc", },
};
+static const struct mfd_cell cros_ec_sensorhub_cells[] = {
+ { .name = "cros-ec-sensorhub", },
+};
+
static const struct mfd_cell cros_usbpd_charger_cells[] = {
{ .name = "cros-usbpd-charger", },
{ .name = "cros-usbpd-logger", },
@@ -112,229 +116,11 @@ static const struct mfd_cell cros_ec_vbc_cells[] = {
{ .name = "cros-ec-vbc", }
};
-static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
-{
- struct cros_ec_command *msg;
- int ret;
-
- if (ec->features[0] == -1U && ec->features[1] == -1U) {
- /* features bitmap not read yet */
- msg = kzalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
- msg->insize = sizeof(ec->features);
-
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
- ret, msg->result);
- memset(ec->features, 0, sizeof(ec->features));
- } else {
- memcpy(ec->features, msg->data, sizeof(ec->features));
- }
-
- dev_dbg(ec->dev, "EC features %08x %08x\n",
- ec->features[0], ec->features[1]);
-
- kfree(msg);
- }
-
- return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
-}
-
static void cros_ec_class_release(struct device *dev)
{
kfree(to_cros_ec_dev(dev));
}
-static void cros_ec_sensors_register(struct cros_ec_dev *ec)
-{
- /*
- * Issue a command to get the number of sensor reported.
- * Build an array of sensors driver and register them all.
- */
- int ret, i, id, sensor_num;
- struct mfd_cell *sensor_cells;
- struct cros_ec_sensor_platform *sensor_platforms;
- int sensor_type[MOTIONSENSE_TYPE_MAX];
- struct ec_params_motion_sense *params;
- struct ec_response_motion_sense *resp;
- struct cros_ec_command *msg;
-
- msg = kzalloc(sizeof(struct cros_ec_command) +
- max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
- if (msg == NULL)
- return;
-
- msg->version = 2;
- msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
- msg->outsize = sizeof(*params);
- msg->insize = sizeof(*resp);
-
- params = (struct ec_params_motion_sense *)msg->data;
- params->cmd = MOTIONSENSE_CMD_DUMP;
-
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
- ret, msg->result);
- goto error;
- }
-
- resp = (struct ec_response_motion_sense *)msg->data;
- sensor_num = resp->dump.sensor_count;
- /*
- * Allocate 2 extra sensors if lid angle sensor and/or FIFO are needed.
- */
- sensor_cells = kcalloc(sensor_num + 2, sizeof(struct mfd_cell),
- GFP_KERNEL);
- if (sensor_cells == NULL)
- goto error;
-
- sensor_platforms = kcalloc(sensor_num,
- sizeof(struct cros_ec_sensor_platform),
- GFP_KERNEL);
- if (sensor_platforms == NULL)
- goto error_platforms;
-
- memset(sensor_type, 0, sizeof(sensor_type));
- id = 0;
- for (i = 0; i < sensor_num; i++) {
- params->cmd = MOTIONSENSE_CMD_INFO;
- params->info.sensor_num = i;
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
- i, ret, msg->result);
- continue;
- }
- switch (resp->info.type) {
- case MOTIONSENSE_TYPE_ACCEL:
- sensor_cells[id].name = "cros-ec-accel";
- break;
- case MOTIONSENSE_TYPE_BARO:
- sensor_cells[id].name = "cros-ec-baro";
- break;
- case MOTIONSENSE_TYPE_GYRO:
- sensor_cells[id].name = "cros-ec-gyro";
- break;
- case MOTIONSENSE_TYPE_MAG:
- sensor_cells[id].name = "cros-ec-mag";
- break;
- case MOTIONSENSE_TYPE_PROX:
- sensor_cells[id].name = "cros-ec-prox";
- break;
- case MOTIONSENSE_TYPE_LIGHT:
- sensor_cells[id].name = "cros-ec-light";
- break;
- case MOTIONSENSE_TYPE_ACTIVITY:
- sensor_cells[id].name = "cros-ec-activity";
- break;
- default:
- dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
- continue;
- }
- sensor_platforms[id].sensor_num = i;
- sensor_cells[id].id = sensor_type[resp->info.type];
- sensor_cells[id].platform_data = &sensor_platforms[id];
- sensor_cells[id].pdata_size =
- sizeof(struct cros_ec_sensor_platform);
-
- sensor_type[resp->info.type]++;
- id++;
- }
-
- if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
- ec->has_kb_wake_angle = true;
-
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
- sensor_cells[id].name = "cros-ec-ring";
- id++;
- }
- if (cros_ec_check_features(ec,
- EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
- sensor_cells[id].name = "cros-ec-lid-angle";
- id++;
- }
-
- ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
- NULL, 0, NULL);
- if (ret)
- dev_err(ec->dev, "failed to add EC sensors\n");
-
- kfree(sensor_platforms);
-error_platforms:
- kfree(sensor_cells);
-error:
- kfree(msg);
-}
-
-static struct cros_ec_sensor_platform sensor_platforms[] = {
- { .sensor_num = 0 },
- { .sensor_num = 1 }
-};
-
-static const struct mfd_cell cros_ec_accel_legacy_cells[] = {
- {
- .name = "cros-ec-accel-legacy",
- .platform_data = &sensor_platforms[0],
- .pdata_size = sizeof(struct cros_ec_sensor_platform),
- },
- {
- .name = "cros-ec-accel-legacy",
- .platform_data = &sensor_platforms[1],
- .pdata_size = sizeof(struct cros_ec_sensor_platform),
- }
-};
-
-static void cros_ec_accel_legacy_register(struct cros_ec_dev *ec)
-{
- struct cros_ec_device *ec_dev = ec->ec_dev;
- u8 status;
- int ret;
-
- /*
- * ECs that need legacy support are the main EC, directly connected to
- * the AP.
- */
- if (ec->cmd_offset != 0)
- return;
-
- /*
- * Check if EC supports direct memory reads and if EC has
- * accelerometers.
- */
- if (ec_dev->cmd_readmem) {
- ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS, 1,
- &status);
- if (ret < 0) {
- dev_warn(ec->dev, "EC direct read error.\n");
- return;
- }
-
- /* Check if EC has accelerometers. */
- if (!(status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
- dev_info(ec->dev, "EC does not have accelerometers.\n");
- return;
- }
- }
-
- /*
- * The device may still support accelerometers:
- * it would be an older ARM based device that do not suppor the
- * EC_CMD_GET_FEATURES command.
- *
- * Register 2 accelerometers, we will fail in the IIO driver if there
- * are no sensors.
- */
- ret = mfd_add_hotplug_devices(ec->dev, cros_ec_accel_legacy_cells,
- ARRAY_SIZE(cros_ec_accel_legacy_cells));
- if (ret)
- dev_err(ec_dev->dev, "failed to add EC sensors\n");
-}
-
static int ec_device_probe(struct platform_device *pdev)
{
int retval = -ENOMEM;
@@ -390,11 +176,14 @@ static int ec_device_probe(struct platform_device *pdev)
goto failed;
/* check whether this EC is a sensor hub. */
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
- cros_ec_sensors_register(ec);
- else
- /* Workaroud for older EC firmware */
- cros_ec_accel_legacy_register(ec);
+ if (cros_ec_get_sensor_count(ec) > 0) {
+ retval = mfd_add_hotplug_devices(ec->dev,
+ cros_ec_sensorhub_cells,
+ ARRAY_SIZE(cros_ec_sensorhub_cells));
+ if (retval)
+ dev_err(ec->dev, "failed to add %s subdevice: %d\n",
+ cros_ec_sensorhub_cells->name, retval);
+ }
/*
* The following subdevices can be detected by sending the
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 6e208a060a58..a5e317073d95 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -94,7 +94,7 @@ enum pci_barno {
struct pci_endpoint_test {
struct pci_dev *pdev;
void __iomem *base;
- void __iomem *bar[6];
+ void __iomem *bar[PCI_STD_NUM_BARS];
struct completion irq_raised;
int last_irq;
int num_irqs;
@@ -687,7 +687,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!pci_endpoint_test_request_irq(test))
goto err_disable_irq;
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
if (!base) {
@@ -740,7 +740,7 @@ err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
err_iounmap:
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
@@ -771,7 +771,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
misc_deregister(&test->miscdev);
kfree(misc_device->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index 426ad912b441..d054e2842a5f 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -96,7 +96,7 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
if (!part)
return NULL;
- if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
+ if (!gen_pool_has_addr(pool, (unsigned long)dst, size))
return NULL;
base = (unsigned long)part->base;
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
index f8b624aca9cc..a27b635eb23a 100644
--- a/drivers/mtd/nand/onenand/Makefile
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -9,6 +9,6 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
-obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung_mtd.o
onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/samsung_mtd.c
index 55e5536a5850..55e5536a5850 100644
--- a/drivers/mtd/nand/onenand/samsung.c
+++ b/drivers/mtd/nand/onenand/samsung_mtd.c
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 0f847d510950..54646c2c2744 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -107,6 +107,7 @@ void ubi_dump_vol_info(const struct ubi_volume *vol)
pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
pr_err("\tcorrupted %d\n", vol->corrupted);
pr_err("\tupd_marker %d\n", vol->upd_marker);
+ pr_err("\tskip_check %d\n", vol->skip_check);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index c44c8470247e..426820ab9afe 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -57,18 +57,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_available(struct rb_root *root)
-{
- struct rb_node *p;
- struct ubi_wl_entry *e;
-
- ubi_rb_for_each_entry(p, e, root, u.rb)
- if (e->pnum < UBI_FM_MAX_START)
- return 1;
-
- return 0;
-}
-
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
@@ -277,8 +265,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
+
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* No luck, trigger wear leveling to produce a new anchor PEB */
+ ubi->fm_do_produce_anchor = 1;
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -294,7 +300,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
return -ENOMEM;
}
- wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
__schedule_ubi_work(ubi, wrk);
return 0;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 30621c67721a..1c7be4eb3ba6 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1540,14 +1540,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
- ret = ubi_ensure_anchor_pebs(ubi);
- if (ret) {
- up_write(&ubi->fm_eba_sem);
- up_write(&ubi->work_sem);
- up_write(&ubi->fm_protect);
- return ret;
- }
-
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
@@ -1618,7 +1610,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
}
spin_lock(&ubi->wl_lock);
- tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ tmp_e = ubi->fm_anchor;
+ ubi->fm_anchor = NULL;
spin_unlock(&ubi->wl_lock);
if (old_fm) {
@@ -1670,6 +1663,9 @@ out_unlock:
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
+
+ ubi_ensure_anchor_pebs(ubi);
+
return ret;
err:
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 721b6aa7936c..9688b411c930 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -491,6 +491,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
+ * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -599,6 +601,8 @@ struct ubi_device {
struct work_struct fm_work;
int fm_work_scheduled;
int fast_attach;
+ struct ubi_wl_entry *fm_anchor;
+ int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -789,7 +793,6 @@ struct ubi_attach_info {
* @vol_id: the volume ID on which this erasure is being performed
* @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
- * @anchor: produce a anchor PEB to by used by fastmap
*
* The @func pointer points to the worker function. If the @shutdown argument is
* not zero, the worker has to free the resources and exit immediately as the
@@ -805,7 +808,6 @@ struct ubi_work {
int vol_id;
int lnum;
int torture;
- int anchor;
};
#include "debug.h"
@@ -968,7 +970,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
-int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
+static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
#endif
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 3fcdefe2714d..5d77a38dba54 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
}
}
- /* If no fastmap has been written and this WL entry can be used
- * as anchor PEB, hold it back and return the second best WL entry
- * such that fastmap can use the anchor PEB later. */
- if (prev_e && !ubi->fm_disabled &&
- !ubi->fm && e->pnum < UBI_FM_MAX_START)
- return prev_e;
-
return e;
}
@@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int erase = 0, keep = 0, vol_id = -1, lnum = -1;
-#ifdef CONFIG_MTD_UBI_FASTMAP
- int anchor = wrk->anchor;
-#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
@@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
#ifdef CONFIG_MTD_UBI_FASTMAP
- /* Check whether we need to produce an anchor PEB */
- if (!anchor)
- anchor = !anchor_pebs_available(&ubi->free);
-
- if (anchor) {
+ if (ubi->fm_do_produce_anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
@@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ ubi->fm_do_produce_anchor = 0;
} else if (!ubi->scrub.rb_node) {
#else
if (!ubi->scrub.rb_node) {
@@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
goto out_cancel;
}
- wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
if (nested)
__schedule_ubi_work(ubi, wrk);
@@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+
+ if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
+ ubi->fm_anchor = e;
+ ubi->fm_do_produce_anchor = 0;
+ } else {
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+
spin_unlock(&ubi->wl_lock);
/*
@@ -1882,6 +1875,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (err)
goto out_free;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi_ensure_anchor_pebs(ubi);
+#endif
return 0;
out_free:
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index a9e2d669acd8..c93a53293786 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,6 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a6f2063f1475..8ed85037f021 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1858,7 +1858,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
/* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.txt)
+ * Documentation/networking/phy.rst)
*/
phy_support_asym_pause(phy);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index c40729b2c184..7fad2f24dcad 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -45,7 +45,6 @@
#define BAR_0 0
#define BAR_1 1
-#define BAR_5 5
#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 416da9619928..aca97b084003 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -977,7 +977,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
if (adapter->need_ioport) {
- for (i = BAR_1; i <= BAR_5; i++) {
+ for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index e85271b68410..681d44cc9784 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -42,7 +42,6 @@
#define BAR_0 0
#define BAR_1 1
-#define BAR_5 5
struct ixgb_adapter;
#include "ixgb_hw.h"
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 0940a0da16f2..3d8c051dd327 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -412,7 +412,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- for (i = BAR_1; i <= BAR_5; i++) {
+ for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index c754987278a9..af4ebd2951b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -329,7 +329,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *out_dev, *route_dev;
struct flowi6 fl6 = {};
struct ipv6hdr *ip6h;
- struct neighbour *n;
+ struct neighbour *n = NULL;
int ipv6_encap_size;
char *encap_header;
u8 nud_state, ttl;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index dbdb7c5ae8f1..39317cdfa6cf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -596,8 +596,8 @@ enum ionic_txq_desc_opcode {
* the @encap is set, the device will
* offload the outer header checksums using
* LCO (local checksum offload) (see
- * Documentation/networking/checksum-
- * offloads.txt for more info).
+ * Documentation/networking/checksum-offloads.rst
+ * for more info).
*
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
*
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 292045f4581f..8237dbc3e991 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -489,7 +489,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
}
/* Get the base address of device */
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
@@ -532,7 +532,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
if (priv->plat->stmmac_clk)
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
pcim_iounmap_regions(pdev, BIT(i));
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
index 386bafe74c3f..fa8604d7b797 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -34,7 +34,7 @@ static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
return ret;
}
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pcidev, i) == 0)
continue;
ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 32ae710d4f40..1081d171e477 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -421,8 +421,6 @@ extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
* Asynchronous Interfacing
*/
-#define SERIAL_MAGIC 0x5301
-
/*
* The size of the serial xmit buffer is 1 page, or 4096 bytes
*/
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 156c2a18a239..e52b300b2f5b 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1139,6 +1139,7 @@ static const struct ntb_dev_data dev_data[] = {
static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 6ec589268b9d..dfe37a525f3a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2412,16 +2412,6 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
- },
- {
- /*
- * This Kingston E8FK11.T firmware version has no interrupt
- * after resume with actions related to suspend to idle
- * https://bugzilla.kernel.org/show_bug.cgi?id=204887
- */
- .vid = 0x2646,
- .fr = "E8FK11.T",
- .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index 39bd76306033..d6b533497ce1 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -17,14 +17,18 @@
static int meson_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
- return meson_sm_call_read((u8 *)val, bytes, SM_EFUSE_READ, offset,
+ struct meson_sm_firmware *fw = context;
+
+ return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
bytes, 0, 0, 0);
}
static int meson_efuse_write(void *context, unsigned int offset,
void *val, size_t bytes)
{
- return meson_sm_call_write((u8 *)val, bytes, SM_EFUSE_WRITE, offset,
+ struct meson_sm_firmware *fw = context;
+
+ return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
bytes, 0, 0, 0);
}
@@ -37,12 +41,25 @@ MODULE_DEVICE_TABLE(of, meson_efuse_match);
static int meson_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct meson_sm_firmware *fw;
+ struct device_node *sm_np;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
struct clk *clk;
unsigned int size;
int ret;
+ sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
+ if (!sm_np) {
+ dev_err(&pdev->dev, "no secure-monitor node\n");
+ return -ENODEV;
+ }
+
+ fw = meson_sm_get(sm_np);
+ of_node_put(sm_np);
+ if (!fw)
+ return -EPROBE_DEFER;
+
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -65,7 +82,7 @@ static int meson_efuse_probe(struct platform_device *pdev)
return ret;
}
- if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
+ if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
dev_err(dev, "failed to get max user");
return -EINVAL;
}
@@ -81,6 +98,7 @@ static int meson_efuse_probe(struct platform_device *pdev)
econfig->reg_read = meson_efuse_read;
econfig->reg_write = meson_efuse_write;
econfig->size = size;
+ econfig->priv = fw;
nvmem = devm_nvmem_register(&pdev->dev, econfig);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 978427a9d5e6..99c1b8058559 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -14,6 +14,8 @@
#include <linux/slab.h>
#include <linux/string.h>
+#include "of_private.h"
+
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
@@ -241,6 +243,7 @@ static int parser_init(struct of_pci_range_parser *parser,
parser->node = node;
parser->pna = of_n_addr_cells(node);
parser->np = parser->pna + na + ns;
+ parser->dma = !strcmp(name, "dma-ranges");
parser->range = of_get_property(node, name, &rlen);
if (parser->range == NULL)
@@ -279,7 +282,11 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
range->pci_space = be32_to_cpup(parser->range);
range->flags = of_bus_pci_get_flags(parser->range);
range->pci_addr = of_read_number(parser->range + 1, ns);
- range->cpu_addr = of_translate_address(parser->node,
+ if (parser->dma)
+ range->cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ range->cpu_addr = of_translate_address(parser->node,
parser->range + na);
range->size = of_read_number(parser->range + parser->pna + na, ns);
@@ -292,8 +299,12 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
flags = of_bus_pci_get_flags(parser->range);
pci_addr = of_read_number(parser->range + 1, ns);
- cpu_addr = of_translate_address(parser->node,
- parser->range + na);
+ if (parser->dma)
+ cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ cpu_addr = of_translate_address(parser->node,
+ parser->range + na);
size = of_read_number(parser->range + parser->pna + na, ns);
if (flags != range->flags)
@@ -517,9 +528,13 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
*
* As far as we know, this damage only exists on Apple machines, so
* This code is only enabled on powerpc. --gcl
+ *
+ * This quirk also applies for 'dma-ranges' which frequently exist in
+ * child nodes without 'dma-ranges' in the parent nodes. --RobH
*/
ranges = of_get_property(parent, rprop, &rlen);
- if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
+ if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
+ strcmp(rprop, "dma-ranges")) {
pr_debug("no ranges; cannot translate\n");
return 1;
}
@@ -695,6 +710,16 @@ static struct device_node *__of_get_dma_parent(const struct device_node *np)
return of_node_get(args.np);
}
+static struct device_node *of_get_next_dma_parent(struct device_node *np)
+{
+ struct device_node *parent;
+
+ parent = __of_get_dma_parent(np);
+ of_node_put(np);
+
+ return parent;
+}
+
u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
{
struct device_node *host;
@@ -826,25 +851,6 @@ int of_address_to_resource(struct device_node *dev, int index,
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
-struct device_node *of_find_matching_node_by_address(struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address)
-{
- struct device_node *dn = of_find_matching_node(from, matches);
- struct resource res;
-
- while (dn) {
- if (!of_address_to_resource(dn, 0, &res) &&
- res.start == base_address)
- return dn;
-
- dn = of_find_matching_node(dn, matches);
- }
-
- return NULL;
-}
-
-
/**
* of_iomap - Maps the memory mapped IO for a given device_node
* @device: the device whose io range will be mapped
@@ -924,47 +930,39 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
const __be32 *ranges = NULL;
int len, naddr, nsize, pna;
int ret = 0;
+ bool found_dma_ranges = false;
u64 dmaaddr;
- if (!node)
- return -EINVAL;
-
- while (1) {
- struct device_node *parent;
-
- naddr = of_n_addr_cells(node);
- nsize = of_n_size_cells(node);
-
- parent = __of_get_dma_parent(node);
- of_node_put(node);
-
- node = parent;
- if (!node)
- break;
-
+ while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
/* Ignore empty ranges, they imply no translation required */
if (ranges && len > 0)
break;
- /*
- * At least empty ranges has to be defined for parent node if
- * DMA is supported
- */
- if (!ranges)
- break;
+ /* Once we find 'dma-ranges', then a missing one is an error */
+ if (found_dma_ranges && !ranges) {
+ ret = -ENODEV;
+ goto out;
+ }
+ found_dma_ranges = true;
+
+ node = of_get_next_dma_parent(node);
}
- if (!ranges) {
+ if (!node || !ranges) {
pr_debug("no dma-ranges found for node(%pOF)\n", np);
ret = -ENODEV;
goto out;
}
- len /= sizeof(u32);
-
+ naddr = of_bus_n_addr_cells(node);
+ nsize = of_bus_n_size_cells(node);
pna = of_n_addr_cells(node);
+ if ((len / sizeof(__be32)) % (pna + naddr + nsize)) {
+ ret = -EINVAL;
+ goto out;
+ }
/* dma-ranges format:
* DMA addr : naddr cells
@@ -972,10 +970,10 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
* size : nsize cells
*/
dmaaddr = of_read_number(ranges, naddr);
- *paddr = of_translate_dma_address(np, ranges);
+ *paddr = of_translate_dma_address(node, ranges + naddr);
if (*paddr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n",
- dma_addr, np);
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ dmaaddr, np);
ret = -EINVAL;
goto out;
}
@@ -991,7 +989,6 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(of_dma_get_range);
/**
* of_dma_is_coherent - Check if device is coherent
@@ -1009,7 +1006,7 @@ bool of_dma_is_coherent(struct device_node *np)
of_node_put(node);
return true;
}
- node = of_get_next_parent(node);
+ node = of_get_next_dma_parent(node);
}
of_node_put(node);
return false;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 1d667eb730e1..db7fbc0c0893 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -86,34 +86,46 @@ static bool __of_node_is_type(const struct device_node *np, const char *type)
return np && match && type && !strcmp(match, type);
}
-int of_n_addr_cells(struct device_node *np)
+int of_bus_n_addr_cells(struct device_node *np)
{
u32 cells;
- do {
- if (np->parent)
- np = np->parent;
+ for (; np; np = np->parent)
if (!of_property_read_u32(np, "#address-cells", &cells))
return cells;
- } while (np->parent);
+
/* No #address-cells property for the root node */
return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
}
+
+int of_n_addr_cells(struct device_node *np)
+{
+ if (np->parent)
+ np = np->parent;
+
+ return of_bus_n_addr_cells(np);
+}
EXPORT_SYMBOL(of_n_addr_cells);
-int of_n_size_cells(struct device_node *np)
+int of_bus_n_size_cells(struct device_node *np)
{
u32 cells;
- do {
- if (np->parent)
- np = np->parent;
+ for (; np; np = np->parent)
if (!of_property_read_u32(np, "#size-cells", &cells))
return cells;
- } while (np->parent);
+
/* No #size-cells property for the root node */
return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
}
+
+int of_n_size_cells(struct device_node *np)
+{
+ if (np->parent)
+ np = np->parent;
+
+ return of_bus_n_size_cells(np);
+}
EXPORT_SYMBOL(of_n_size_cells);
#ifdef CONFIG_NUMA
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index f1c23aad951e..2cdf64d2456f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -947,8 +947,8 @@ int __init early_init_dt_scan_chosen_stdout(void)
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
- of_setup_earlycon(match, offset, options);
- return 0;
+ if (of_setup_earlycon(match, offset, options) == 0)
+ return 0;
}
return -ENODEV;
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 24786818e32e..66294d29942a 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -158,4 +158,18 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
#define for_each_transaction_entry_reverse(_oft, _te) \
list_for_each_entry_reverse(_te, &(_oft)->te_list, node)
+extern int of_bus_n_addr_cells(struct device_node *np);
+extern int of_bus_n_size_cells(struct device_node *np);
+
+#ifdef CONFIG_OF_ADDRESS
+extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size);
+#else
+static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index c423e94baf0f..9617b7df7c4d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -305,7 +305,6 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
{
struct property *new_prop = NULL, *prop;
int ret = 0;
- bool check_for_non_overlay_node = false;
if (target->in_livetree)
if (!of_prop_cmp(overlay_prop->name, "name") ||
@@ -318,6 +317,25 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
else
prop = NULL;
+ if (prop) {
+ if (!of_prop_cmp(prop->name, "#address-cells")) {
+ if (!of_prop_val_eq(prop, overlay_prop)) {
+ pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
+ target->np);
+ ret = -EINVAL;
+ }
+ return ret;
+
+ } else if (!of_prop_cmp(prop->name, "#size-cells")) {
+ if (!of_prop_val_eq(prop, overlay_prop)) {
+ pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
+ target->np);
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+ }
+
if (is_symbols_prop) {
if (prop)
return -EINVAL;
@@ -330,33 +348,18 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
return -ENOMEM;
if (!prop) {
- check_for_non_overlay_node = true;
if (!target->in_livetree) {
new_prop->next = target->np->deadprops;
target->np->deadprops = new_prop;
}
ret = of_changeset_add_property(&ovcs->cset, target->np,
new_prop);
- } else if (!of_prop_cmp(prop->name, "#address-cells")) {
- if (!of_prop_val_eq(prop, new_prop)) {
- pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
- target->np);
- ret = -EINVAL;
- }
- } else if (!of_prop_cmp(prop->name, "#size-cells")) {
- if (!of_prop_val_eq(prop, new_prop)) {
- pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
- target->np);
- ret = -EINVAL;
- }
} else {
- check_for_non_overlay_node = true;
ret = of_changeset_update_property(&ovcs->cset, target->np,
new_prop);
}
- if (check_for_non_overlay_node &&
- !of_node_check_flag(target->np, OF_OVERLAY))
+ if (!of_node_check_flag(target->np, OF_OVERLAY))
pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
target->np, new_prop->name);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 477966d2421a..e851c57a15b0 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -165,7 +165,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u64_index);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to return found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -335,7 +335,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u64);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts
index 55fe0ee20109..a85b5e1c381a 100644
--- a/drivers/of/unittest-data/testcases.dts
+++ b/drivers/of/unittest-data/testcases.dts
@@ -15,5 +15,6 @@
#include "tests-phandle.dtsi"
#include "tests-interrupts.dtsi"
#include "tests-match.dtsi"
+#include "tests-address.dtsi"
#include "tests-platform.dtsi"
#include "tests-overlay.dtsi"
diff --git a/drivers/of/unittest-data/tests-address.dtsi b/drivers/of/unittest-data/tests-address.dtsi
new file mode 100644
index 000000000000..3fe5d3987beb
--- /dev/null
+++ b/drivers/of/unittest-data/tests-address.dtsi
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ testcase-data {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ address-tests {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* ranges here is to make sure we don't use it for
+ * dma-ranges translation */
+ ranges = <0x70000000 0x70000000 0x40000000>,
+ <0x00000000 0xd0000000 0x20000000>;
+ dma-ranges = <0x0 0x20000000 0x40000000>;
+
+ device@70000000 {
+ reg = <0x70000000 0x1000>;
+ };
+
+ bus@80000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x80000000 0x100000>;
+ dma-ranges = <0x10000000 0x0 0x40000000>;
+
+ device@1000 {
+ reg = <0x1000 0x1000>;
+ };
+ };
+
+ pci@90000000 {
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0x90000000 0x1000>;
+ ranges = <0x42000000 0x0 0x40000000 0x40000000 0x0 0x10000000>;
+ dma-ranges = <0x42000000 0x0 0x80000000 0x00000000 0x0 0x10000000>,
+ <0x42000000 0x0 0xc0000000 0x20000000 0x0 0x10000000>;
+ };
+
+ };
+ };
+};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 92e895d86458..68b87587b2ef 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -12,6 +12,7 @@
#include <linux/hashtable.h>
#include <linux/libfdt.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -779,6 +780,95 @@ static void __init of_unittest_changeset(void)
#endif
}
+static void __init of_unittest_dma_ranges_one(const char *path,
+ u64 expect_dma_addr, u64 expect_paddr, u64 expect_size)
+{
+ struct device_node *np;
+ u64 dma_addr, paddr, size;
+ int rc;
+
+ np = of_find_node_by_path(path);
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ rc = of_dma_get_range(np, &dma_addr, &paddr, &size);
+
+ unittest(!rc, "of_dma_get_range failed on node %pOF rc=%i\n", np, rc);
+ if (!rc) {
+ unittest(size == expect_size,
+ "of_dma_get_range wrong size on node %pOF size=%llx\n", np, size);
+ unittest(paddr == expect_paddr,
+ "of_dma_get_range wrong phys addr (%llx) on node %pOF", paddr, np);
+ unittest(dma_addr == expect_dma_addr,
+ "of_dma_get_range wrong DMA addr (%llx) on node %pOF", dma_addr, np);
+ }
+ of_node_put(np);
+}
+
+static void __init of_unittest_parse_dma_ranges(void)
+{
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000",
+ 0x0, 0x20000000, 0x40000000);
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
+ 0x10000000, 0x20000000, 0x40000000);
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000",
+ 0x80000000, 0x20000000, 0x10000000);
+}
+
+static void __init of_unittest_pci_dma_ranges(void)
+{
+ struct device_node *np;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int i = 0;
+
+ if (!IS_ENABLED(CONFIG_PCI))
+ return;
+
+ np = of_find_node_by_path("/testcase-data/address-tests/pci@90000000");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ pr_err("missing dma-ranges property\n");
+ return;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ if (!i) {
+ unittest(range.size == 0x10000000,
+ "for_each_of_pci_range wrong size on node %pOF size=%llx\n",
+ np, range.size);
+ unittest(range.cpu_addr == 0x20000000,
+ "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
+ range.cpu_addr, np);
+ unittest(range.pci_addr == 0x80000000,
+ "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
+ range.pci_addr, np);
+ } else {
+ unittest(range.size == 0x10000000,
+ "for_each_of_pci_range wrong size on node %pOF size=%llx\n",
+ np, range.size);
+ unittest(range.cpu_addr == 0x40000000,
+ "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
+ range.cpu_addr, np);
+ unittest(range.pci_addr == 0xc0000000,
+ "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
+ range.pci_addr, np);
+ }
+ i++;
+ }
+
+ of_node_put(np);
+}
+
static void __init of_unittest_parse_interrupts(void)
{
struct device_node *np;
@@ -1146,8 +1236,10 @@ static void attach_node_and_children(struct device_node *np)
full_name = kasprintf(GFP_KERNEL, "%pOF", np);
if (!strcmp(full_name, "/__local_fixups__") ||
- !strcmp(full_name, "/__fixups__"))
+ !strcmp(full_name, "/__fixups__")) {
+ kfree(full_name);
return;
+ }
dup = of_find_node_by_path(full_name);
kfree(full_name);
@@ -2555,6 +2647,8 @@ static int __init of_unittest(void)
of_unittest_changeset();
of_unittest_parse_interrupts();
of_unittest_parse_interrupts_extended();
+ of_unittest_parse_dma_ranges();
+ of_unittest_pci_dma_ranges();
of_unittest_match_node();
of_unittest_platform_populate();
of_unittest_overlay();
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index a304f5ea11b9..4bef5c2bae9f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -52,7 +52,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- def_bool ARC || ARM || ARM64 || X86 || RISCV
+ def_bool y
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
@@ -106,14 +106,14 @@ config PCI_PF_STUB
When in doubt, say N.
config XEN_PCIDEV_FRONTEND
- tristate "Xen PCI Frontend"
- depends on X86 && XEN
- select PCI_XEN
+ tristate "Xen PCI Frontend"
+ depends on X86 && XEN
+ select PCI_XEN
select XEN_XENBUS_FRONTEND
- default y
- help
- The PCI device frontend driver allows the kernel to import arbitrary
- PCI devices from a PCI backend to support PCI driver domains.
+ default y
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
config PCI_ATS
bool
@@ -180,12 +180,12 @@ config PCI_LABEL
select NLS
config PCI_HYPERV
- tristate "Hyper-V PCI Frontend"
- depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
+ tristate "Hyper-V PCI Frontend"
+ depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
select PCI_HYPERV_INTERFACE
- help
- The PCI device frontend driver allows the kernel to import arbitrary
- PCI devices from a PCI backend to support PCI driver domains.
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 28cdd8c0213a..522d2b974e91 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
setup-bus.o vc.o mmap.o setup-irq.o
+obj-$(CONFIG_PCI) += pcie/
+
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
@@ -15,7 +17,6 @@ endif
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
-obj-$(CONFIG_PCIEPORTBUS) += pcie/
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2fccb5762c76..79c4a2ef269a 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -355,7 +355,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
}
-static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
+bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index e18499243f84..982b46f0a54d 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -60,8 +60,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
pdev = pci_physfn(dev);
if (pdev->ats_stu != ps)
return -EINVAL;
-
- atomic_inc(&pdev->ats_ref_cnt); /* count enabled VFs */
} else {
dev->ats_stu = ps;
ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
@@ -71,7 +69,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
dev->ats_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_ats);
/**
* pci_disable_ats - disable the ATS capability
@@ -79,27 +76,17 @@ EXPORT_SYMBOL_GPL(pci_enable_ats);
*/
void pci_disable_ats(struct pci_dev *dev)
{
- struct pci_dev *pdev;
u16 ctrl;
if (WARN_ON(!dev->ats_enabled))
return;
- if (atomic_read(&dev->ats_ref_cnt))
- return; /* VFs still enabled */
-
- if (dev->is_virtfn) {
- pdev = pci_physfn(dev);
- atomic_dec(&pdev->ats_ref_cnt);
- }
-
pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl);
ctrl &= ~PCI_ATS_CTRL_ENABLE;
pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
dev->ats_enabled = 0;
}
-EXPORT_SYMBOL_GPL(pci_disable_ats);
void pci_restore_ats_state(struct pci_dev *dev)
{
@@ -113,7 +100,6 @@ void pci_restore_ats_state(struct pci_dev *dev)
ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
}
-EXPORT_SYMBOL_GPL(pci_restore_ats_state);
/**
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
@@ -140,7 +126,6 @@ int pci_ats_queue_depth(struct pci_dev *dev)
pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap);
return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP;
}
-EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
/**
* pci_ats_page_aligned - Return Page Aligned Request bit status.
@@ -167,9 +152,22 @@ int pci_ats_page_aligned(struct pci_dev *pdev)
return 0;
}
-EXPORT_SYMBOL_GPL(pci_ats_page_aligned);
#ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *pdev)
+{
+ u16 status;
+
+ pdev->pri_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+
+ if (!pdev->pri_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pri_cap + PCI_PRI_STATUS, &status);
+ if (status & PCI_PRI_STATUS_PASID)
+ pdev->pasid_required = 1;
+}
+
/**
* pci_enable_pri - Enable PRI capability
* @ pdev: PCI device structure
@@ -180,32 +178,41 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
{
u16 control, status;
u32 max_requests;
- int pos;
+ int pri = pdev->pri_cap;
+
+ /*
+ * VFs must not implement the PRI Capability. If their PF
+ * implements PRI, it is shared by the VFs, so if the PF PRI is
+ * enabled, it is also enabled for the VF.
+ */
+ if (pdev->is_virtfn) {
+ if (pci_physfn(pdev)->pri_enabled)
+ return 0;
+ return -EINVAL;
+ }
if (WARN_ON(pdev->pri_enabled))
return -EBUSY;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ pci_read_config_word(pdev, pri + PCI_PRI_STATUS, &status);
if (!(status & PCI_PRI_STATUS_STOPPED))
return -EBUSY;
- pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
+ pci_read_config_dword(pdev, pri + PCI_PRI_MAX_REQ, &max_requests);
reqs = min(max_requests, reqs);
pdev->pri_reqs_alloc = reqs;
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+ pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
control = PCI_PRI_CTRL_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
pdev->pri_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_pri);
/**
* pci_disable_pri - Disable PRI capability
@@ -216,18 +223,21 @@ EXPORT_SYMBOL_GPL(pci_enable_pri);
void pci_disable_pri(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pri = pdev->pri_cap;
+
+ /* VFs share the PF PRI */
+ if (pdev->is_virtfn)
+ return;
if (WARN_ON(!pdev->pri_enabled))
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pri + PCI_PRI_CTRL, &control);
control &= ~PCI_PRI_CTRL_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
pdev->pri_enabled = 0;
}
@@ -241,19 +251,20 @@ void pci_restore_pri_state(struct pci_dev *pdev)
{
u16 control = PCI_PRI_CTRL_ENABLE;
u32 reqs = pdev->pri_reqs_alloc;
- int pos;
+ int pri = pdev->pri_cap;
+
+ if (pdev->is_virtfn)
+ return;
if (!pdev->pri_enabled)
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return;
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
}
-EXPORT_SYMBOL_GPL(pci_restore_pri_state);
/**
* pci_reset_pri - Resets device's PRI state
@@ -265,24 +276,45 @@ EXPORT_SYMBOL_GPL(pci_restore_pri_state);
int pci_reset_pri(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pri = pdev->pri_cap;
+
+ if (pdev->is_virtfn)
+ return 0;
if (WARN_ON(pdev->pri_enabled))
return -EBUSY;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return -EINVAL;
control = PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
return 0;
}
-EXPORT_SYMBOL_GPL(pci_reset_pri);
+
+/**
+ * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
+ * status.
+ * @pdev: PCI device structure
+ *
+ * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
+ */
+int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ return pdev->pasid_required;
+}
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *pdev)
+{
+ pdev->pasid_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+}
+
/**
* pci_enable_pasid - Enable the PASID capability
* @pdev: PCI device structure
@@ -295,7 +327,17 @@ EXPORT_SYMBOL_GPL(pci_reset_pri);
int pci_enable_pasid(struct pci_dev *pdev, int features)
{
u16 control, supported;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ /*
+ * VFs must not implement the PASID Capability, but if a PF
+ * supports PASID, its VFs share the PF PASID configuration.
+ */
+ if (pdev->is_virtfn) {
+ if (pci_physfn(pdev)->pasid_enabled)
+ return 0;
+ return -EINVAL;
+ }
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
@@ -303,11 +345,10 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (!pdev->eetlp_prefix_path)
return -EINVAL;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
/* User wants to enable anything unsupported? */
@@ -317,13 +358,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
control = PCI_PASID_CTRL_ENABLE | features;
pdev->pasid_features = features;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
pdev->pasid_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_pasid);
/**
* pci_disable_pasid - Disable the PASID capability
@@ -332,20 +372,22 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
void pci_disable_pasid(struct pci_dev *pdev)
{
u16 control = 0;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ /* VFs share the PF PASID configuration */
+ if (pdev->is_virtfn)
+ return;
if (WARN_ON(!pdev->pasid_enabled))
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
pdev->pasid_enabled = 0;
}
-EXPORT_SYMBOL_GPL(pci_disable_pasid);
/**
* pci_restore_pasid_state - Restore PASID capabilities
@@ -354,19 +396,20 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
void pci_restore_pasid_state(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ if (pdev->is_virtfn)
+ return;
if (!pdev->pasid_enabled)
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return;
control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
}
-EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
/**
* pci_pasid_features - Check which PASID features are supported
@@ -381,49 +424,20 @@ EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
int pci_pasid_features(struct pci_dev *pdev)
{
u16 supported;
- int pos;
+ int pasid = pdev->pasid_cap;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
return supported;
}
-EXPORT_SYMBOL_GPL(pci_pasid_features);
-
-/**
- * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
- * status.
- * @pdev: PCI device structure
- *
- * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
- *
- * Even though the PRG response PASID status is read from PRI Status
- * Register, since this API will mainly be used by PASID users, this
- * function is defined within #ifdef CONFIG_PCI_PASID instead of
- * CONFIG_PCI_PRI.
- */
-int pci_prg_resp_pasid_required(struct pci_dev *pdev)
-{
- u16 status;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return 0;
-
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
- if (status & PCI_PRI_STATUS_PASID)
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
@@ -437,17 +451,18 @@ EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
int pci_max_pasids(struct pci_dev *pdev)
{
u16 supported;
- int pos;
+ int pasid = pdev->pasid_cap;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
return (1 << supported);
}
-EXPORT_SYMBOL_GPL(pci_max_pasids);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 70e078238899..c77069c8ee5d 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -22,34 +22,6 @@ config PCI_AARDVARK
controller is part of the South Bridge of the Marvel Armada
3700 SoC.
-menu "Cadence PCIe controllers support"
-
-config PCIE_CADENCE
- bool
-
-config PCIE_CADENCE_HOST
- bool "Cadence PCIe host controller"
- depends on OF
- depends on PCI
- select IRQ_DOMAIN
- select PCIE_CADENCE
- help
- Say Y here if you want to support the Cadence PCIe controller in host
- mode. This PCIe controller may be embedded into many different vendors
- SoCs.
-
-config PCIE_CADENCE_EP
- bool "Cadence PCIe endpoint controller"
- depends on OF
- depends on PCI_ENDPOINT
- select PCIE_CADENCE
- help
- Say Y here if you want to support the Cadence PCIe controller in
- endpoint mode. This PCIe controller may be embedded into many
- different vendors SoCs.
-
-endmenu
-
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
depends on ARCH_ZYNQMP || COMPILE_TEST
@@ -135,7 +107,7 @@ config PCI_V3_SEMI
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
- depends on ARCH_VERSATILE
+ depends on ARCH_VERSATILE || COMPILE_TEST
config PCIE_IPROC
tristate
@@ -289,4 +261,5 @@ config PCI_HYPERV_INTERFACE
have a common interface with the Hyper-V PCI frontend driver.
source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/cadence/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index a2a22c9d91af..3d4f597f15ce 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
-obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
-obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE) += cadence/
obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
new file mode 100644
index 000000000000..b76b3cf55ce5
--- /dev/null
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Cadence PCIe controllers support"
+ depends on PCI
+
+config PCIE_CADENCE
+ bool
+
+config PCIE_CADENCE_HOST
+ bool
+ depends on OF
+ select IRQ_DOMAIN
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_EP
+ bool
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_PLAT
+ bool
+
+config PCIE_CADENCE_PLAT_HOST
+ bool "Cadence PCIe platform host controller"
+ depends on OF
+ select PCIE_CADENCE_HOST
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ host mode. This PCIe controller may be embedded into many different
+ vendors SoCs.
+
+config PCIE_CADENCE_PLAT_EP
+ bool "Cadence PCIe platform endpoint controller"
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE_EP
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ endpoint mode. This PCIe controller may be embedded into many
+ different vendors SoCs.
+
+endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
new file mode 100644
index 000000000000..232a3f20876a
--- /dev/null
+++ b/drivers/pci/controller/cadence/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index def7820cb824..1c173dad67d1 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -17,35 +17,6 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-/**
- * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
- * @pcie: Cadence PCIe controller
- * @max_regions: maximum number of regions supported by hardware
- * @ob_region_map: bitmask of mapped outbound regions
- * @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
- * dedicated outbound regions is mapped.
- * @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
- * IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
- * dedicated outbound region.
- * @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
- */
-struct cdns_pcie_ep {
- struct cdns_pcie pcie;
- u32 max_regions;
- unsigned long ob_region_map;
- phys_addr_t *ob_addr;
- phys_addr_t irq_phys_addr;
- void __iomem *irq_cpu_addr;
- u64 irq_pci_addr;
- u8 irq_pci_fn;
- u8 irq_pending;
-};
-
static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
struct pci_epf_header *hdr)
{
@@ -424,28 +395,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
-static const struct of_device_id cdns_pcie_ep_of_match[] = {
- { .compatible = "cdns,cdns-pcie-ep" },
-
- { },
-};
-static int cdns_pcie_ep_probe(struct platform_device *pdev)
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = ep->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- struct cdns_pcie_ep *ep;
- struct cdns_pcie *pcie;
- struct pci_epc *epc;
+ struct cdns_pcie *pcie = &ep->pcie;
struct resource *res;
+ struct pci_epc *epc;
int ret;
- int phy_count;
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep)
- return -ENOMEM;
- pcie = &ep->pcie;
pcie->is_rc = false;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
@@ -474,19 +434,6 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
if (!ep->ob_addr)
return -ENOMEM;
- ret = cdns_pcie_init_phy(dev, pcie);
- if (ret) {
- dev_err(dev, "failed to init phy\n");
- return ret;
- }
- platform_set_drvdata(pdev, pcie);
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- goto err_get_sync;
- }
-
/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
@@ -528,38 +475,5 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
err_init:
pm_runtime_put_sync(dev);
- err_get_sync:
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
- phy_count = pcie->phy_count;
- while (phy_count--)
- device_link_del(pcie->link[phy_count]);
-
return ret;
}
-
-static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct cdns_pcie *pcie = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_put_sync(dev);
- if (ret < 0)
- dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
- pm_runtime_disable(dev);
-
- cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_ep_driver = {
- .driver = {
- .name = "cdns-pcie-ep",
- .of_match_table = cdns_pcie_ep_of_match,
- .pm = &cdns_pcie_pm_ops,
- },
- .probe = cdns_pcie_ep_probe,
- .shutdown = cdns_pcie_ep_shutdown,
-};
-builtin_platform_driver(cdns_pcie_ep_driver);
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 97e251090b4f..9b1c3966414b 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -11,33 +11,6 @@
#include "pcie-cadence.h"
-/**
- * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
- * @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
- * @cfg_res: start/end offsets in the physical system memory to map PCI
- * configuration space accesses
- * @bus_range: first/last buses behind the PCIe host controller
- * @cfg_base: IO mapped window to access the PCI configuration space of a
- * single function at a time
- * @max_regions: maximum number of regions supported by the hardware
- * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
- * translation (nbits sets into the "no BAR match" register)
- * @vendor_id: PCI vendor ID
- * @device_id: PCI device ID
- */
-struct cdns_pcie_rc {
- struct cdns_pcie pcie;
- struct device *dev;
- struct resource *cfg_res;
- struct resource *bus_range;
- void __iomem *cfg_base;
- u32 max_regions;
- u32 no_bar_nbits;
- u16 vendor_id;
- u16 device_id;
-};
-
static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -92,11 +65,6 @@ static struct pci_ops cdns_pcie_host_ops = {
.write = pci_generic_config_write,
};
-static const struct of_device_id cdns_pcie_host_of_match[] = {
- { .compatible = "cdns,cdns-pcie-host" },
-
- { },
-};
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
@@ -136,10 +104,10 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
- struct resource *cfg_res = rc->cfg_res;
struct resource *mem_res = pcie->mem_res;
struct resource *bus_range = rc->bus_range;
- struct device *dev = rc->dev;
+ struct resource *cfg_res = rc->cfg_res;
+ struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
struct of_pci_range_parser parser;
struct of_pci_range range;
@@ -211,7 +179,7 @@ static int cdns_pcie_host_init(struct device *dev,
int err;
/* Parse our PCI ranges and request their resources */
- err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+ err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return err;
@@ -233,25 +201,21 @@ static int cdns_pcie_host_init(struct device *dev,
return err;
}
-static int cdns_pcie_host_probe(struct platform_device *pdev)
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = rc->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
struct list_head resources;
- struct cdns_pcie_rc *rc;
struct cdns_pcie *pcie;
struct resource *res;
int ret;
- int phy_count;
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ bridge = pci_host_bridge_from_priv(rc);
if (!bridge)
return -ENOMEM;
- rc = pci_host_bridge_priv(bridge);
- rc->dev = dev;
-
pcie = &rc->pcie;
pcie->is_rc = true;
@@ -287,21 +251,8 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
dev_err(dev, "missing \"mem\"\n");
return -EINVAL;
}
- pcie->mem_res = res;
- ret = cdns_pcie_init_phy(dev, pcie);
- if (ret) {
- dev_err(dev, "failed to init phy\n");
- return ret;
- }
- platform_set_drvdata(pdev, pcie);
-
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- goto err_get_sync;
- }
+ pcie->mem_res = res;
ret = cdns_pcie_host_init(dev, &resources, rc);
if (ret)
@@ -326,37 +277,5 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
err_init:
pm_runtime_put_sync(dev);
- err_get_sync:
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
- phy_count = pcie->phy_count;
- while (phy_count--)
- device_link_del(pcie->link[phy_count]);
-
return ret;
}
-
-static void cdns_pcie_shutdown(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct cdns_pcie *pcie = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_put_sync(dev);
- if (ret < 0)
- dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_host_driver = {
- .driver = {
- .name = "cdns-pcie-host",
- .of_match_table = cdns_pcie_host_of_match,
- .pm = &cdns_pcie_pm_ops,
- },
- .probe = cdns_pcie_host_probe,
- .shutdown = cdns_pcie_shutdown,
-};
-builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
new file mode 100644
index 000000000000..f5c6bf6dfcb8
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe platform driver.
+ *
+ * Copyright (c) 2019, Cadence Design Systems
+ * Author: Tom Joseph <tjoseph@cadence.com>
+ */
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include "pcie-cadence.h"
+
+/**
+ * struct cdns_plat_pcie - private data for this PCIe platform driver
+ * @pcie: Cadence PCIe controller
+ * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
+ * if 0 it is in Endpoint mode.
+ */
+struct cdns_plat_pcie {
+ struct cdns_pcie *pcie;
+ bool is_rc;
+};
+
+struct cdns_plat_pcie_of_data {
+ bool is_rc;
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[];
+
+static int cdns_plat_pcie_probe(struct platform_device *pdev)
+{
+ const struct cdns_plat_pcie_of_data *data;
+ struct cdns_plat_pcie *cdns_plat_pcie;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+ int phy_count;
+ bool is_rc;
+ int ret;
+
+ match = of_match_device(cdns_plat_pcie_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_plat_pcie_of_data *)match->data;
+ is_rc = data->is_rc;
+
+ pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
+ cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL);
+ if (!cdns_plat_pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cdns_plat_pcie);
+ if (is_rc) {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ rc = pci_host_bridge_priv(bridge);
+ rc->pcie.dev = dev;
+ cdns_plat_pcie->pcie = &rc->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret)
+ goto err_init;
+ } else {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->pcie.dev = dev;
+ cdns_plat_pcie->pcie = &ep->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_ep_setup(ep);
+ if (ret)
+ goto err_init;
+ }
+
+ err_init:
+ pm_runtime_put_sync(dev);
+
+ err_get_sync:
+ pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(cdns_plat_pcie->pcie);
+ phy_count = cdns_plat_pcie->pcie->phy_count;
+ while (phy_count--)
+ device_link_del(cdns_plat_pcie->pcie->link[phy_count]);
+
+ return 0;
+}
+
+static void cdns_plat_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+
+ cdns_pcie_disable_phy(pcie);
+}
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = {
+ .is_rc = true,
+};
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = {
+ .is_rc = false,
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[] = {
+ {
+ .compatible = "cdns,cdns-pcie-host",
+ .data = &cdns_plat_pcie_host_of_data,
+ },
+ {
+ .compatible = "cdns,cdns-pcie-ep",
+ .data = &cdns_plat_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver cdns_plat_pcie_driver = {
+ .driver = {
+ .name = "cdns-pcie",
+ .of_match_table = cdns_plat_pcie_of_match,
+ .pm = &cdns_pcie_pm_ops,
+ },
+ .probe = cdns_plat_pcie_probe,
+ .shutdown = cdns_plat_pcie_shutdown,
+};
+builtin_platform_driver(cdns_plat_pcie_driver);
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index cd795f6fc1e2..cd795f6fc1e2 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index ae6bf2a2b3d3..a2b28b912ca4 100644
--- a/drivers/pci/controller/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (c) 2017 Cadence
// Cadence PCIe controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
@@ -190,6 +190,8 @@ enum cdns_pcie_rp_bar {
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+struct cdns_pcie;
+
enum cdns_pcie_msg_code {
MSG_CODE_ASSERT_INTA = 0x20,
MSG_CODE_ASSERT_INTB = 0x21,
@@ -231,13 +233,71 @@ enum cdns_pcie_msg_routing {
struct cdns_pcie {
void __iomem *reg_base;
struct resource *mem_res;
+ struct device *dev;
bool is_rc;
u8 bus;
int phy_count;
struct phy **phy;
struct device_link **link;
+ const struct cdns_pcie_common_ops *ops;
+};
+
+/**
+ * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
+ * @pcie: Cadence PCIe controller
+ * @dev: pointer to PCIe device
+ * @cfg_res: start/end offsets in the physical system memory to map PCI
+ * configuration space accesses
+ * @bus_range: first/last buses behind the PCIe host controller
+ * @cfg_base: IO mapped window to access the PCI configuration space of a
+ * single function at a time
+ * @max_regions: maximum number of regions supported by the hardware
+ * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
+ * translation (nbits sets into the "no BAR match" register)
+ * @vendor_id: PCI vendor ID
+ * @device_id: PCI device ID
+ */
+struct cdns_pcie_rc {
+ struct cdns_pcie pcie;
+ struct resource *cfg_res;
+ struct resource *bus_range;
+ void __iomem *cfg_base;
+ u32 max_regions;
+ u32 no_bar_nbits;
+ u16 vendor_id;
+ u16 device_id;
};
+/**
+ * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
+ * @pcie: Cadence PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct cdns_pcie_ep {
+ struct cdns_pcie pcie;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+};
+
+
/* Register access */
static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
{
@@ -306,6 +366,23 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
+#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+#else
+static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PCIE_CADENCE_EP
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+#else
+static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+{
+ return 0;
+}
+#endif
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 0ba988b5b5bc..625a031b2193 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -7,9 +7,9 @@ config PCIE_DW
bool
config PCIE_DW_HOST
- bool
+ bool
depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW
+ select PCIE_DW
config PCIE_DW_EP
bool
@@ -224,7 +224,7 @@ config PCIE_HISI_STB
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCI_MESON
bool "MESON PCIe controller"
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 4234ddb4722f..b20651cea09f 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -353,7 +353,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index ca9aa4501e7e..0d151cead1b7 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -58,7 +58,7 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3a5fa26d5e56..f24f79a70d9a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -263,6 +263,7 @@ static const struct ls_pcie_drvdata ls2088_drvdata = {
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index e35e9eaa50ee..3772b02a5c55 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -16,6 +16,7 @@
#include <linux/reset.h>
#include <linux/resource.h>
#include <linux/types.h>
+#include <linux/phy/phy.h>
#include "pcie-designware.h"
@@ -96,12 +97,18 @@ struct meson_pcie_rc_reset {
struct reset_control *apb;
};
+struct meson_pcie_param {
+ bool has_shared_phy;
+};
+
struct meson_pcie {
struct dw_pcie pci;
struct meson_pcie_mem_res mem_res;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
+ struct phy *phy;
+ const struct meson_pcie_param *param;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -123,10 +130,12 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
- mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
- if (IS_ERR(mrst->phy))
- return PTR_ERR(mrst->phy);
- reset_control_deassert(mrst->phy);
+ if (!mp->param->has_shared_phy) {
+ mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->phy))
+ return PTR_ERR(mrst->phy);
+ reset_control_deassert(mrst->phy);
+ }
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
@@ -180,27 +189,52 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
if (IS_ERR(mp->mem_res.cfg_base))
return PTR_ERR(mp->mem_res.cfg_base);
- /* Meson SoC has two PCI controllers use same phy register*/
- mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
- if (IS_ERR(mp->mem_res.phy_base))
- return PTR_ERR(mp->mem_res.phy_base);
+ /* Meson AXG SoC has two PCI controllers use same phy register */
+ if (!mp->param->has_shared_phy) {
+ mp->mem_res.phy_base =
+ meson_pcie_get_mem_shared(pdev, mp, "phy");
+ if (IS_ERR(mp->mem_res.phy_base))
+ return PTR_ERR(mp->mem_res.phy_base);
+ }
return 0;
}
-static void meson_pcie_power_on(struct meson_pcie *mp)
+static int meson_pcie_power_on(struct meson_pcie *mp)
{
- writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+ int ret = 0;
+
+ if (mp->param->has_shared_phy) {
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
+ } else
+ writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+
+ return 0;
}
-static void meson_pcie_reset(struct meson_pcie *mp)
+static int meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
-
- reset_control_assert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- reset_control_deassert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
+ int ret = 0;
+
+ if (mp->param->has_shared_phy) {
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
+ } else {
+ reset_control_assert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ }
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
@@ -208,6 +242,8 @@ static void meson_pcie_reset(struct meson_pcie *mp)
reset_control_deassert(mrst->port);
reset_control_deassert(mrst->apb);
udelay(PCIE_RESET_DELAY);
+
+ return 0;
}
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
@@ -250,15 +286,17 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
- if (IS_ERR(res->mipi_gate))
- return PTR_ERR(res->mipi_gate);
+ if (!mp->param->has_shared_phy) {
+ res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
+ if (IS_ERR(res->mipi_gate))
+ return PTR_ERR(res->mipi_gate);
+ }
- res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
- res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
if (IS_ERR(res->clk))
return PTR_ERR(res->clk);
@@ -287,9 +325,9 @@ static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
static void meson_pcie_assert_reset(struct meson_pcie *mp)
{
- gpiod_set_value_cansleep(mp->reset_gpio, 0);
- udelay(500);
gpiod_set_value_cansleep(mp->reset_gpio, 1);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
}
static void meson_pcie_init_dw(struct meson_pcie *mp)
@@ -524,6 +562,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int meson_pcie_probe(struct platform_device *pdev)
{
+ const struct meson_pcie_param *match_data;
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct meson_pcie *mp;
@@ -537,6 +576,19 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ match_data = of_device_get_match_data(dev);
+ if (!match_data) {
+ dev_err(dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+ mp->param = match_data;
+
+ if (mp->param->has_shared_phy) {
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy))
+ return PTR_ERR(mp->phy);
+ }
+
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(mp->reset_gpio)) {
dev_err(dev, "get reset gpio failed\n");
@@ -555,13 +607,22 @@ static int meson_pcie_probe(struct platform_device *pdev)
return ret;
}
- meson_pcie_power_on(mp);
- meson_pcie_reset(mp);
+ ret = meson_pcie_power_on(mp);
+ if (ret) {
+ dev_err(dev, "phy power on failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_reset(mp);
+ if (ret) {
+ dev_err(dev, "reset failed, %d\n", ret);
+ goto err_phy;
+ }
ret = meson_pcie_probe_clocks(mp);
if (ret) {
dev_err(dev, "init clock resources failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
platform_set_drvdata(pdev, mp);
@@ -569,15 +630,36 @@ static int meson_pcie_probe(struct platform_device *pdev)
ret = meson_add_pcie_port(mp, pdev);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
return 0;
+
+err_phy:
+ if (mp->param->has_shared_phy) {
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+ }
+
+ return ret;
}
+static struct meson_pcie_param meson_pcie_axg_param = {
+ .has_shared_phy = false,
+};
+
+static struct meson_pcie_param meson_pcie_g12a_param = {
+ .has_shared_phy = true,
+};
+
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
+ .data = &meson_pcie_axg_param,
+ },
+ {
+ .compatible = "amlogic,g12a-pcie",
+ .data = &meson_pcie_g12a_param,
},
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index d00252bd8fae..9e2482bd7b6d 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -422,7 +422,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
artpec6_pcie_wait_for_phy(artpec6_pcie);
artpec6_pcie_set_nfts(artpec6_pcie);
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 0f36a926059a..395feb8ca051 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -10,6 +10,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci_regs.h>
@@ -78,7 +79,8 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
int i, pos, irq;
- u32 val, num_ctrls;
+ unsigned long val;
+ u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
@@ -86,14 +88,14 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
for (i = 0; i < num_ctrls; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
(i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &val);
- if (!val)
+ 4, &status);
+ if (!status)
continue;
ret = IRQ_HANDLED;
+ val = status;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val,
- MAX_MSI_IRQS_PER_CTRL,
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
irq = irq_find_mapping(pp->irq_domain,
(i * MAX_MSI_IRQS_PER_CTRL) +
@@ -319,7 +321,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- struct resource_entry *win, *tmp;
+ struct resource_entry *win;
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
@@ -342,31 +344,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (!bridge)
return -ENOMEM;
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &bridge->windows, &pp->io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret)
return ret;
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
+ resource_list_for_each_entry(win, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- ret = devm_pci_remap_iospace(dev, win->res,
- pp->io_base);
- if (ret) {
- dev_warn(dev, "Error %d: failed to map resource %pR\n",
- ret, win->res);
- resource_list_destroy_entry(win);
- } else {
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- }
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ pp->io_base = pci_pio_to_address(pp->io->start);
break;
case IORESOURCE_MEM:
pp->mem = win->res;
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index b58fdcbc664b..73646b677aff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -70,7 +70,7 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 5a18e94e52c8..5accdd6bc388 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -214,7 +214,7 @@ struct dw_pcie_ep {
phys_addr_t phys_base;
size_t addr_size;
size_t page_size;
- u8 bar_to_atu[6];
+ u8 bar_to_atu[PCI_STD_NUM_BARS];
phys_addr_t *outbound_addr;
unsigned long *ib_window_map;
unsigned long *ob_window_map;
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index f89f5acee72d..cbe95f0ea0ca 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -40,8 +40,6 @@
#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN BIT(9)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD BIT(10)
#define APPL_CTRL 0x4
#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
@@ -1193,8 +1191,8 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
if (!pcie->supports_clkreq) {
val = appl_readl(pcie, APPL_PINMUX);
- val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
- val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
+ val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
appl_writel(pcie, val, APPL_PINMUX);
}
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 3f30ee4a00b3..8fd7badd59c2 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -33,6 +33,10 @@
#define PCL_PIPEMON 0x0044
#define PCL_PCLK_ALIVE BIT(15)
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
#define PCL_APP_READY_CTRL 0x8008
#define PCL_APP_LTSSM_ENABLE BIT(0)
@@ -85,6 +89,12 @@ static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
{
u32 val;
+ /* set RC MODE */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN;
+ val &= ~PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
/* use auxiliary power detection */
val = readl(priv->base + PCL_APP_PM0);
val |= PCL_SYS_AUX_PWR_DET;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index fc0fe4d4de49..2a20b649f40c 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -175,18 +176,20 @@
(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
-#define PIO_TIMEOUT_MS 1
+#define PIO_RETRY_CNT 500
+#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
+#define RETRAIN_WAIT_MAX_RETRIES 10
+#define RETRAIN_WAIT_USLEEP_US 2000
#define MSI_IRQ_NUM 32
struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
- struct list_head resources;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
struct irq_domain *msi_domain;
@@ -239,6 +242,17 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
+{
+ size_t retries;
+
+ for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
+ if (!advk_pcie_link_up(pcie))
+ break;
+ udelay(RETRAIN_WAIT_USLEEP_US);
+ }
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
@@ -324,6 +338,14 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
+ /*
+ * PERST# signal could have been asserted by pinctrl subsystem before
+ * probe() callback has been called, making the endpoint going into
+ * fundamental reset. As required by PCI Express spec a delay for at
+ * least 100ms after such a reset before link training is needed.
+ */
+ msleep(PCI_PM_D3COLD_WAIT);
+
/* Start link training */
reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
reg |= PCIE_CORE_LINK_TRAINING;
@@ -383,17 +405,16 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
+ int i;
- while (time_before(jiffies, timeout)) {
+ for (i = 0; i < PIO_RETRY_CNT; i++) {
u32 start, isr;
start = advk_readl(pcie, PIO_START);
isr = advk_readl(pcie, PIO_ISR);
if (!start && isr)
return 0;
+ udelay(PIO_RETRY_DELAY);
}
dev_err(dev, "config read/write timed out\n");
@@ -415,7 +436,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+ *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -426,11 +447,20 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED;
}
+ case PCI_EXP_LNKCTL: {
+ /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
+ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
+ ~(PCI_EXP_LNKSTA_LT << 16);
+ if (!advk_pcie_link_up(pcie))
+ val |= (PCI_EXP_LNKSTA_LT << 16);
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_LNKCAP:
- case PCI_EXP_LNKCTL:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED;
default:
@@ -447,14 +477,24 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_EXP_DEVCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
case PCI_EXP_LNKCTL:
advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ if (new & PCI_EXP_LNKCTL_RL)
+ advk_pcie_wait_for_retrain(pcie);
break;
- case PCI_EXP_RTCTL:
- new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
- advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+ case PCI_EXP_RTCTL: {
+ /* Only mask/unmask PME interrupt */
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
+ ~PCIE_MSG_PM_PME_MASK;
+ if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
+ val |= PCIE_MSG_PM_PME_MASK;
+ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
break;
+ }
case PCI_EXP_RTSTA:
new = (new & PCI_EXP_RTSTA_PME) >> 9;
@@ -479,18 +519,20 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
- bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
- bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
+ bridge->conf.vendor =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
+ bridge->conf.device =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
bridge->conf.class_revision =
- advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
+ cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
/* Support 32 bits I/O addressing */
bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
/* Support 64 bits memory pref */
- bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
- bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
+ bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+ bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
@@ -910,63 +952,11 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
-{
- int err, res_valid = 0;
- struct device *dev = &pcie->pdev->dev;
- struct resource_entry *win, *tmp;
- resource_size_t iobase;
-
- INIT_LIST_HEAD(&pcie->resources);
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
- struct resource *res = win->res;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- break;
- case IORESOURCE_BUS:
- pcie->root_bus_nr = res->start;
- break;
- }
- }
-
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
-
- return 0;
-
-out_release_res:
- pci_free_resource_list(&pcie->resources);
- return err;
-}
-
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
- struct resource *res;
+ struct resource *res, *bus;
struct pci_host_bridge *bridge;
int ret, irq;
@@ -991,11 +981,13 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- ret = advk_pcie_parse_request_of_pci_ranges(pcie);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, &bus);
if (ret) {
dev_err(dev, "Failed to parse resources\n");
return ret;
}
+ pcie->root_bus_nr = bus->start;
advk_pcie_setup_hw(pcie);
@@ -1014,7 +1006,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = 0;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index bf5ece5d9291..1b67564de7af 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -375,12 +375,11 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return 0;
}
-static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
- struct device_node *np)
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *entry;
u32 confreg[3] = {
FARADAY_PCI_MEM1_BASE_SIZE,
FARADAY_PCI_MEM2_BASE_SIZE,
@@ -389,19 +388,13 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
int i = 0;
u32 val;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /*
- * Get the dma-ranges from the device tree
- */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.pci_addr + range.size - 1;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ u64 pci_addr = entry->res->start - entry->offset;
+ u64 end = entry->res->end - entry->offset;
int ret;
- ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
+ ret = faraday_res_to_memcfg(pci_addr,
+ resource_size(entry->res), &val);
if (ret) {
dev_err(dev,
"DMA range %d: illegal MEM resource size\n", i);
@@ -409,7 +402,7 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
}
dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
- i + 1, range.pci_addr, end, val);
+ i + 1, pci_addr, end, val);
if (i <= 2) {
faraday_raw_pci_write_config(p, 0, 0, confreg[i],
4, val);
@@ -430,10 +423,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
const struct faraday_pci_variant *variant =
of_device_get_match_data(dev);
struct resource *regs;
- resource_size_t io_base;
struct resource_entry *win;
struct faraday_pci *p;
- struct resource *mem;
struct resource *io;
struct pci_host_bridge *host;
struct clk *clk;
@@ -441,7 +432,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
unsigned char cur_bus_speed = PCI_SPEED_33MHz;
int ret;
u32 val;
- LIST_HEAD(res);
host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
if (!host)
@@ -480,44 +470,21 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &res, &io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &res);
+ ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
+ &host->dma_ranges, NULL);
if (ret)
return ret;
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "Gemini PCI I/O";
- if (!faraday_res_to_memcfg(io->start - win->offset,
- resource_size(io), &val)) {
- /* setup I/O space size */
- writel(val, p->base + PCI_IOSIZE);
- } else {
- dev_err(dev, "illegal IO mem size\n");
- return -EINVAL;
- }
- ret = devm_pci_remap_iospace(dev, io, io_base);
- if (ret) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- ret, io);
- continue;
- }
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "Gemini PCI MEM";
- break;
- case IORESOURCE_BUS:
- break;
- default:
- break;
+ win = resource_list_first_type(&host->windows, IORESOURCE_IO);
+ if (win) {
+ io = win->res;
+ if (!faraday_res_to_memcfg(io->start - win->offset,
+ resource_size(io), &val)) {
+ /* setup I/O space size */
+ writel(val, p->base + PCI_IOSIZE);
+ } else {
+ dev_err(dev, "illegal IO mem size\n");
+ return -EINVAL;
}
}
@@ -565,11 +532,10 @@ static int faraday_pci_probe(struct platform_device *pdev)
cur_bus_speed = PCI_SPEED_66MHz;
}
- ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
+ ret = faraday_pci_parse_map_dma_ranges(p);
if (ret)
return ret;
- list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to scan host: %d\n", ret);
@@ -581,7 +547,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
pci_bus_assign_resources(p->bus);
pci_bus_add_devices(p->bus);
- pci_free_resource_list(&res);
return 0;
}
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index c8cb9c5188a4..250a3fc80ec6 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -27,7 +27,7 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
struct pci_config_window *cfg;
/* Parse our PCI ranges and request their resources */
- err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+ err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return ERR_PTR(err);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index f1f300218fab..9977abff92fc 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -76,11 +76,6 @@ static enum pci_protocol_version_t pci_protocol_versions[] = {
PCI_PROTOCOL_VERSION_1_1,
};
-/*
- * Protocol version negotiated by hv_pci_protocol_negotiation().
- */
-static enum pci_protocol_version_t pci_protocol_version;
-
#define PCI_CONFIG_MMIO_LENGTH 0x2000
#define CFG_PAGE_OFFSET 0x1000
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -307,7 +302,7 @@ struct pci_bus_relations {
struct pci_q_res_req_response {
struct vmpacket_descriptor hdr;
s32 status; /* negative values are failures */
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
} __packed;
struct pci_set_power {
@@ -455,12 +450,15 @@ enum hv_pcibus_state {
hv_pcibus_init = 0,
hv_pcibus_probed,
hv_pcibus_installed,
+ hv_pcibus_removing,
hv_pcibus_removed,
hv_pcibus_maximum
};
struct hv_pcibus_device {
struct pci_sysdata sysdata;
+ /* Protocol version negotiated with the host */
+ enum pci_protocol_version_t protocol_version;
enum hv_pcibus_state state;
refcount_t remove_lock;
struct hv_device *hdev;
@@ -539,7 +537,7 @@ struct hv_pci_dev {
* What would be observed if one wrote 0xFFFFFFFF to a BAR and then
* read it back, for each of the BAR offsets within config space.
*/
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
};
struct hv_pci_compl {
@@ -1224,7 +1222,7 @@ static void hv_irq_unmask(struct irq_data *data)
* negative effect (yet?).
*/
- if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
/*
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
* HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
@@ -1394,7 +1392,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
ctxt.pci_pkt.compl_ctxt = &comp;
- switch (pci_protocol_version) {
+ switch (hbus->protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
@@ -1610,7 +1608,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* so it's sufficient to just add them up without tracking alignment.
*/
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
"There's an I/O BAR in this list!\n");
@@ -1681,10 +1679,27 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
spin_lock_irqsave(&hbus->device_list_lock, flags);
+ /*
+ * Clear the memory enable bit, in case it's already set. This occurs
+ * in the suspend path of hibernation, where the device is suspended,
+ * resumed and suspended again: see hibernation_snapshot() and
+ * hibernation_platform_enter().
+ *
+ * If the memory enable bit is already set, Hyper-V sliently ignores
+ * the below BAR updates, and the related PCI device driver can not
+ * work, because reading from the device register(s) always returns
+ * 0xFFFFFFFF.
+ */
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
+ }
+
/* Pick addresses for the BARs. */
do {
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
continue;
@@ -1841,7 +1856,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
"query resource requirements failed: %x\n",
resp->status);
} else {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
completion->hpdev->probed_bar[i] =
q_res_req->probed_bar[i];
}
@@ -2107,6 +2122,12 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
unsigned long flags;
bool pending_dr;
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hbus->hdev->device,
+ "PCI VMBus BUS_RELATIONS: ignored\n");
+ return;
+ }
+
dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
if (!dr_wrk)
return;
@@ -2223,11 +2244,19 @@ static void hv_eject_device_work(struct work_struct *work)
*/
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct hv_device *hdev = hbus->hdev;
+
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
+ return;
+ }
+
hpdev->state = hv_pcichild_ejecting;
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
- get_hvpcibus(hpdev->hbus);
- queue_work(hpdev->hbus->wq, &hpdev->wrk);
+ get_hvpcibus(hbus);
+ queue_work(hbus->wq, &hpdev->wrk);
}
/**
@@ -2379,8 +2408,11 @@ static void hv_pci_onchannelcallback(void *context)
* failing if the host doesn't support the necessary protocol
* level.
*/
-static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+static int hv_pci_protocol_negotiation(struct hv_device *hdev,
+ enum pci_protocol_version_t version[],
+ int num_version)
{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_version_request *version_req;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
@@ -2403,8 +2435,8 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
version_req = (struct pci_version_request *)&pkt->message;
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
- for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
- version_req->protocol_version = pci_protocol_versions[i];
+ for (i = 0; i < num_version; i++) {
+ version_req->protocol_version = version[i];
ret = vmbus_sendpacket(hdev->channel, version_req,
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
@@ -2420,10 +2452,10 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
}
if (comp_pkt.completion_status >= 0) {
- pci_protocol_version = pci_protocol_versions[i];
+ hbus->protocol_version = version[i];
dev_info(&hdev->device,
"PCI VMBus probing: Using version %#x\n",
- pci_protocol_version);
+ hbus->protocol_version);
goto exit;
}
@@ -2707,7 +2739,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
u32 wslot;
int ret;
- size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+ size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
? sizeof(*res_assigned) : sizeof(*res_assigned2);
pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
@@ -2726,7 +2758,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+ if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
(struct pci_resources_assigned *)&pkt->message;
res_assigned->message_type.type =
@@ -2870,9 +2902,27 @@ static int hv_pci_probe(struct hv_device *hdev,
* hv_pcibus_device contains the hypercall arguments for retargeting in
* hv_irq_unmask(). Those must not cross a page boundary.
*/
- BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
- hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
+ /*
+ * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
+ * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
+ * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
+ * alignment of hbus is important because hbus's field
+ * retarget_msi_interrupt_params must not cross a 4KB page boundary.
+ *
+ * Here we prefer kzalloc to get_zeroed_page(), because a buffer
+ * allocated by the latter is not tracked and scanned by kmemleak, and
+ * hence kmemleak reports the pointer contained in the hbus buffer
+ * (i.e. the hpdev struct, which is created in new_pcichild_device() and
+ * is tracked by hbus->children) as memory leak (false positive).
+ *
+ * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
+ * used to allocate the hbus buffer and we can avoid the kmemleak false
+ * positive by using kmemleak_alloc() and kmemleak_free() to ask
+ * kmemleak to track and scan the hbus buffer.
+ */
+ hbus = (struct hv_pcibus_device *)kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
@@ -2930,7 +2980,8 @@ static int hv_pci_probe(struct hv_device *hdev,
hv_set_drvdata(hdev, hbus);
- ret = hv_pci_protocol_negotiation(hdev);
+ ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
+ ARRAY_SIZE(pci_protocol_versions));
if (ret)
goto close;
@@ -3011,7 +3062,7 @@ free_bus:
return ret;
}
-static void hv_pci_bus_exit(struct hv_device *hdev)
+static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct {
@@ -3027,16 +3078,20 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
* access the per-channel ringbuffer any longer.
*/
if (hdev->channel->rescind)
- return;
+ return 0;
- /* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
+ if (!hibernating) {
+ /* Delete any children which might still exist. */
+ memset(&relations, 0, sizeof(relations));
+ hv_pci_devices_present(hbus, &relations);
+ }
ret = hv_send_resources_released(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->device,
"Couldn't send resources released packet(s)\n");
+ return ret;
+ }
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
@@ -3049,8 +3104,13 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
(unsigned long)&pkt.teardown_packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (!ret)
- wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+ if (ret)
+ return ret;
+
+ if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
+ return -ETIMEDOUT;
+
+ return 0;
}
/**
@@ -3062,6 +3122,7 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
static int hv_pci_remove(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus;
+ int ret;
hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
@@ -3074,7 +3135,7 @@ static int hv_pci_remove(struct hv_device *hdev)
hbus->state = hv_pcibus_removed;
}
- hv_pci_bus_exit(hdev);
+ ret = hv_pci_bus_exit(hdev, false);
vmbus_close(hdev->channel);
@@ -3090,10 +3151,97 @@ static int hv_pci_remove(struct hv_device *hdev)
hv_put_dom_num(hbus->sysdata.domain);
- free_page((unsigned long)hbus);
+ kfree(hbus);
+ return ret;
+}
+
+static int hv_pci_suspend(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum hv_pcibus_state old_state;
+ int ret;
+
+ /*
+ * hv_pci_suspend() must make sure there are no pending work items
+ * before calling vmbus_close(), since it runs in a process context
+ * as a callback in dpm_suspend(). When it starts to run, the channel
+ * callback hv_pci_onchannelcallback(), which runs in a tasklet
+ * context, can be still running concurrently and scheduling new work
+ * items onto hbus->wq in hv_pci_devices_present() and
+ * hv_pci_eject_device(), and the work item handlers can access the
+ * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
+ * the work item handler pci_devices_present_work() ->
+ * new_pcichild_device() writes to the vmbus channel.
+ *
+ * To eliminate the race, hv_pci_suspend() disables the channel
+ * callback tasklet, sets hbus->state to hv_pcibus_removing, and
+ * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
+ * it knows that no new work item can be scheduled, and then it flushes
+ * hbus->wq and safely closes the vmbus channel.
+ */
+ tasklet_disable(&hdev->channel->callback_event);
+
+ /* Change the hbus state to prevent new work items. */
+ old_state = hbus->state;
+ if (hbus->state == hv_pcibus_installed)
+ hbus->state = hv_pcibus_removing;
+
+ tasklet_enable(&hdev->channel->callback_event);
+
+ if (old_state != hv_pcibus_installed)
+ return -EINVAL;
+
+ flush_workqueue(hbus->wq);
+
+ ret = hv_pci_bus_exit(hdev, true);
+ if (ret)
+ return ret;
+
+ vmbus_close(hdev->channel);
+
return 0;
}
+static int hv_pci_resume(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum pci_protocol_version_t version[1];
+ int ret;
+
+ hbus->state = hv_pcibus_init;
+
+ ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ hv_pci_onchannelcallback, hbus);
+ if (ret)
+ return ret;
+
+ /* Only use the version that was in use before hibernation. */
+ version[0] = hbus->protocol_version;
+ ret = hv_pci_protocol_negotiation(hdev, version, 1);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_enter_d0(hdev);
+ if (ret)
+ goto out;
+
+ ret = hv_send_resources_allocated(hdev);
+ if (ret)
+ goto out;
+
+ prepopulate_bars(hbus);
+
+ hbus->state = hv_pcibus_installed;
+ return 0;
+out:
+ vmbus_close(hdev->channel);
+ return ret;
+}
+
static const struct hv_vmbus_device_id hv_pci_id_table[] = {
/* PCI Pass-through Class ID */
/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
@@ -3108,6 +3256,8 @@ static struct hv_driver hv_pci_drv = {
.id_table = hv_pci_id_table,
.probe = hv_pci_probe,
.remove = hv_pci_remove,
+ .suspend = hv_pci_suspend,
+ .resume = hv_pci_resume,
};
static void __exit exit_hv_pci_drv(void)
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index d3a0419e42f2..153a64676bc9 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -554,7 +554,7 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
-struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
.write_base = mvebu_pci_bridge_emul_base_conf_write,
.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
@@ -713,7 +713,7 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
ret = of_address_to_resource(np, 0, &regs);
if (ret)
- return ERR_PTR(ret);
+ return (void __iomem *)ERR_PTR(ret);
return devm_ioremap_resource(&pdev->dev, &regs);
}
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f127ce8bd4ef..9491e266b1ea 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/pci.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-acpi.h>
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index d219404bad92..bd05221f5a22 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -241,10 +241,8 @@ struct v3_pci {
void __iomem *config_base;
struct pci_bus *bus;
u32 config_mem;
- u32 io_mem;
u32 non_pre_mem;
u32 pre_mem;
- phys_addr_t io_bus_addr;
phys_addr_t non_pre_bus_addr;
phys_addr_t pre_bus_addr;
struct regmap *map;
@@ -520,35 +518,22 @@ static int v3_integrator_init(struct v3_pci *v3)
}
static int v3_pci_setup_resource(struct v3_pci *v3,
- resource_size_t io_base,
struct pci_host_bridge *host,
struct resource_entry *win)
{
struct device *dev = v3->dev;
struct resource *mem;
struct resource *io;
- int ret;
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
- io->name = "V3 PCI I/O";
- v3->io_mem = io_base;
- v3->io_bus_addr = io->start - win->offset;
- dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
- io, &v3->io_bus_addr);
- ret = devm_pci_remap_iospace(dev, io, io_base);
- if (ret) {
- dev_warn(dev,
- "error %d: failed to map resource %pR\n",
- ret, io);
- return ret;
- }
+
/* Setup window 2 - PCI I/O */
- writel(v3_addr_to_lb_base2(v3->io_mem) |
+ writel(v3_addr_to_lb_base2(pci_pio_to_address(io->start)) |
V3_LB_BASE2_ENABLE,
v3->base + V3_LB_BASE2);
- writew(v3_addr_to_lb_map2(v3->io_bus_addr),
+ writew(v3_addr_to_lb_map2(io->start - win->offset),
v3->base + V3_LB_MAP2);
break;
case IORESOURCE_MEM:
@@ -613,28 +598,30 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
}
static int v3_get_dma_range_config(struct v3_pci *v3,
- struct of_pci_range *range,
+ struct resource_entry *entry,
u32 *pci_base, u32 *pci_map)
{
struct device *dev = v3->dev;
- u64 cpu_end = range->cpu_addr + range->size - 1;
- u64 pci_end = range->pci_addr + range->size - 1;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_end = cpu_end - entry->offset;
+ u64 pci_addr = entry->res->start - entry->offset;
u32 val;
- if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+ if (pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
return -EINVAL;
}
- val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+ val = ((u32)pci_addr) & V3_PCI_BASE_M_ADR_BASE;
*pci_base = val;
- if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+ if (cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
return -EINVAL;
}
- val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+ val = ((u32)cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
- switch (range->size) {
+ switch (resource_size(entry->res)) {
case SZ_1M:
val |= V3_LB_BASE_ADR_SIZE_1MB;
break;
@@ -682,8 +669,8 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
dev_dbg(dev,
"DMA MEM CPU: 0x%016llx -> 0x%016llx => "
"PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
- range->cpu_addr, cpu_end,
- range->pci_addr, pci_end,
+ cpu_addr, cpu_end,
+ pci_addr, pci_end,
*pci_base, *pci_map);
return 0;
@@ -692,24 +679,16 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
struct device_node *np)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(v3);
struct device *dev = v3->dev;
+ struct resource_entry *entry;
int i = 0;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /*
- * Get the dma-ranges from the device tree
- */
- for_each_of_pci_range(&parser, &range) {
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
int ret;
u32 pci_base, pci_map;
- ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
+ ret = v3_get_dma_range_config(v3, entry, &pci_base, &pci_map);
if (ret)
return ret;
@@ -732,7 +711,6 @@ static int v3_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- resource_size_t io_base;
struct resource *regs;
struct resource_entry *win;
struct v3_pci *v3;
@@ -741,7 +719,6 @@ static int v3_pci_probe(struct platform_device *pdev)
u16 val;
int irq;
int ret;
- LIST_HEAD(res);
host = pci_alloc_host_bridge(sizeof(*v3));
if (!host)
@@ -793,12 +770,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &res);
+ ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
+ &host->dma_ranges, NULL);
if (ret)
return ret;
@@ -852,8 +825,8 @@ static int v3_pci_probe(struct platform_device *pdev)
writew(val, v3->base + V3_PCI_CMD);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- ret = v3_pci_setup_resource(v3, io_base, host, win);
+ resource_list_for_each_entry(win, &host->windows) {
+ ret = v3_pci_setup_resource(v3, host, win);
if (ret) {
dev_err(dev, "error setting up resources\n");
return ret;
@@ -931,7 +904,6 @@ static int v3_pci_probe(struct platform_device *pdev)
val |= V3_SYSTEM_M_LOCK;
writew(val, v3->base + V3_SYSTEM);
- list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to register host: %d\n", ret);
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index f59ad2728c0b..b911359b6d81 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -62,65 +62,16 @@ static struct pci_ops pci_versatile_ops = {
.write = pci_generic_config_write,
};
-static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *res)
-{
- int err, mem = 1, res_valid = 0;
- resource_size_t iobase;
- struct resource_entry *win, *tmp;
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, res);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, res) {
- struct resource *res = win->res;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
-
- writel(res->start >> 28, PCI_IMAP(mem));
- writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
- mem++;
-
- break;
- }
- }
-
- if (res_valid)
- return 0;
-
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
-
-out_release_res:
- pci_free_resource_list(res);
- return err;
-}
-
static int versatile_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
- int ret, i, myslot = -1;
+ struct resource_entry *entry;
+ int ret, i, myslot = -1, mem = 1;
u32 val;
void __iomem *local_pci_cfg_base;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
- LIST_HEAD(pci_res);
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
@@ -141,10 +92,19 @@ static int versatile_pci_probe(struct platform_device *pdev)
if (IS_ERR(versatile_cfg_base[1]))
return PTR_ERR(versatile_cfg_base[1]);
- ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ NULL, NULL);
if (ret)
return ret;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ writel(entry->res->start >> 28, PCI_IMAP(mem));
+ writel(__pa(PAGE_OFFSET) >> 28, PCI_SMAP(mem));
+ mem++;
+ }
+ }
+
/*
* We need to discover the PCI core first to configure itself
* before the main PCI probing is performed
@@ -177,9 +137,9 @@ static int versatile_pci_probe(struct platform_device *pdev)
/*
* Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
*/
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_2);
/*
* For many years the kernel and QEMU were symbiotically buggy
@@ -197,7 +157,6 @@ static int versatile_pci_probe(struct platform_device *pdev)
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
- list_splice_init(&pci_res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = NULL;
bridge->busnr = 0;
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index ffda3e8b4742..de195fd430dc 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -405,15 +405,13 @@ static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
-static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
- struct list_head *res,
- resource_size_t io_base)
+static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *window;
struct device *dev = port->dev;
- int ret;
- resource_list_for_each_entry(window, res) {
+ resource_list_for_each_entry(window, &bridge->windows) {
struct resource *res = window->res;
u64 restype = resource_type(res);
@@ -421,11 +419,9 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
switch (restype) {
case IORESOURCE_IO:
- xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
+ xgene_pcie_setup_ob_reg(port, res, OMR3BARL,
+ pci_pio_to_address(res->start),
res->start - window->offset);
- ret = devm_pci_remap_iospace(dev, res, io_base);
- if (ret < 0)
- return ret;
break;
case IORESOURCE_MEM:
if (res->flags & IORESOURCE_PREFETCH)
@@ -485,27 +481,28 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
}
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
- struct of_pci_range *range, u8 *ib_reg_mask)
+ struct resource_entry *entry,
+ u8 *ib_reg_mask)
{
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
u32 pim_reg;
- u64 cpu_addr = range->cpu_addr;
- u64 pci_addr = range->pci_addr;
- u64 size = range->size;
+ u64 cpu_addr = entry->res->start;
+ u64 pci_addr = cpu_addr - entry->offset;
+ u64 size = resource_size(entry->res);
u64 mask = ~(size - 1) | EN_REG;
u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
u32 bar_low;
int region;
- region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
if (region < 0) {
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
- if (range->flags & IORESOURCE_PREFETCH)
+ if (entry->res->flags & IORESOURCE_PREFETCH)
flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
@@ -536,25 +533,13 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
- struct device_node *np = port->node;
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- struct device *dev = port->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
+ struct resource_entry *entry;
u8 ib_reg_mask = 0;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /* Get the dma-ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.cpu_addr + range.size - 1;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges)
+ xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
- dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
- range.flags, range.cpu_addr, end, range.pci_addr);
- xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
- }
return 0;
}
@@ -567,8 +552,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
xgene_pcie_writel(port, i, 0);
}
-static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
- resource_size_t io_base)
+static int xgene_pcie_setup(struct xgene_pcie_port *port)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
@@ -580,7 +564,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
xgene_pcie_writel(port, BRIDGE_CFG_0, val);
- ret = xgene_pcie_map_ranges(port, res, io_base);
+ ret = xgene_pcie_map_ranges(port);
if (ret)
return ret;
@@ -607,11 +591,9 @@ static int xgene_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
- resource_size_t iobase = 0;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int ret;
- LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
if (!bridge)
@@ -634,20 +616,15 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret)
return ret;
- ret = devm_request_pci_bus_resources(dev, &res);
- if (ret)
- goto error;
-
- ret = xgene_pcie_setup(port, &res, iobase);
+ ret = xgene_pcie_setup(port);
if (ret)
- goto error;
+ return ret;
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@@ -657,7 +634,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0)
- goto error;
+ return ret;
bus = bridge->bus;
@@ -666,10 +643,6 @@ static int xgene_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return ret;
}
static const struct of_device_id xgene_pcie_match_table[] = {
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index d2497ca43828..b447c3e4abad 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -92,7 +92,6 @@ struct altera_pcie {
u8 root_bus_nr;
struct irq_domain *irq_domain;
struct resource bus_range;
- struct list_head resources;
const struct altera_pcie_data *pcie_data;
};
@@ -670,39 +669,6 @@ static void altera_pcie_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
-{
- int err, res_valid = 0;
- struct device *dev = &pcie->pdev->dev;
- struct resource_entry *win;
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, NULL);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry(win, &pcie->resources) {
- struct resource *res = win->res;
-
- if (resource_type(res) == IORESOURCE_MEM)
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- }
-
- if (res_valid)
- return 0;
-
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
-
-out_release_res:
- pci_free_resource_list(&pcie->resources);
- return err;
-}
-
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
@@ -833,9 +799,8 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- INIT_LIST_HEAD(&pcie->resources);
-
- ret = altera_pcie_parse_request_of_pci_ranges(pcie);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Failed add resources\n");
return ret;
@@ -853,7 +818,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -884,7 +848,6 @@ static int altera_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
- pci_free_resource_list(&pcie->resources);
altera_pcie_irq_teardown(pcie);
return 0;
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 0a3f61be5625..3176ad3ab0e5 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -293,11 +293,12 @@ static const struct irq_domain_ops msi_domain_ops = {
static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
{
- u32 *msg, hwirq;
+ u32 __iomem *msg;
+ u32 hwirq;
unsigned int offs;
offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
- msg = (u32 *)(msi->eq_cpu + offs);
+ msg = (u32 __iomem *)(msi->eq_cpu + offs);
hwirq = readl(msg);
hwirq = (hwirq >> 5) + (hwirq & 0x1f);
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 9ee6200a66f4..ff0a81a632a1 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -43,8 +43,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
struct iproc_pcie *pcie;
struct device_node *np = dev->of_node;
struct resource reg;
- resource_size_t iobase = 0;
- LIST_HEAD(resources);
struct pci_host_bridge *bridge;
int ret;
@@ -97,8 +95,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
if (IS_ERR(pcie->phy))
return PTR_ERR(pcie->phy);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
- &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
@@ -113,10 +111,9 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->map_irq = of_irq_parse_and_map_pci;
}
- ret = iproc_pcie_setup(pcie, &resources);
+ ret = iproc_pcie_setup(pcie, &bridge->windows);
if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
- pci_free_resource_list(&resources);
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 2d457bfdaf66..0a468c73bae3 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -1122,15 +1122,16 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
}
static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
- struct of_pci_range *range,
+ struct resource_entry *entry,
enum iproc_pcie_ib_map_type type)
{
struct device *dev = pcie->dev;
struct iproc_pcie_ib *ib = &pcie->ib;
int ret;
unsigned int region_idx, size_idx;
- u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
- resource_size_t size = range->size;
+ u64 axi_addr = entry->res->start;
+ u64 pci_addr = entry->res->start - entry->offset;
+ resource_size_t size = resource_size(entry->res);
/* iterate through all IARR mapping regions */
for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
@@ -1182,67 +1183,46 @@ err_ib:
return ret;
}
-static int iproc_pcie_add_dma_range(struct device *dev,
- struct list_head *resources,
- struct of_pci_range *range)
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
- struct resource *res;
- struct resource_entry *entry, *tmp;
- struct list_head *head = resources;
-
- res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ int ret = 0;
- resource_list_for_each_entry(tmp, resources) {
- if (tmp->res->start < range->cpu_addr)
- head = &tmp->node;
+ resource_list_for_each_entry(entry, &host->dma_ranges) {
+ /* Each range entry corresponds to an inbound mapping region */
+ ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM);
+ if (ret)
+ break;
}
- res->start = range->cpu_addr;
- res->end = res->start + range->size - 1;
-
- entry = resource_list_create_entry(res, 0);
- if (!entry)
- return -ENOMEM;
-
- entry->offset = res->start - range->cpu_addr;
- resource_list_add(entry, head);
-
- return 0;
+ return ret;
}
-static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie)
{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int ret;
- LIST_HEAD(resources);
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ struct iproc_pcie_ob *ob = &pcie->ob;
+ int idx;
- /* Get the dma-ranges from DT */
- ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
- if (ret)
- return ret;
+ if (pcie->ep_is_internal)
+ return;
- for_each_of_pci_range(&parser, &range) {
- ret = iproc_pcie_add_dma_range(pcie->dev,
- &resources,
- &range);
- if (ret)
- goto out;
- /* Each range entry corresponds to an inbound mapping region */
- ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
- if (ret)
- goto out;
+ if (pcie->need_ob_cfg) {
+ /* iterate through all OARR mapping regions */
+ for (idx = ob->nr_windows - 1; idx >= 0; idx--) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_OARR0, idx), 0);
+ }
}
- list_splice_init(&resources, &host->dma_ranges);
-
- return 0;
-out:
- pci_free_resource_list(&resources);
- return ret;
+ if (pcie->need_ib_cfg) {
+ /* iterate through all IARR mapping regions */
+ for (idx = 0; idx < ib->nr_regions; idx++) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, idx), 0);
+ }
+ }
}
static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
@@ -1276,13 +1256,16 @@ static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
{
int ret;
- struct of_pci_range range;
+ struct resource_entry entry;
- memset(&range, 0, sizeof(range));
- range.size = SZ_32K;
- range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+ memset(&entry, 0, sizeof(entry));
+ entry.res = &entry.__res;
- ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+ msi_addr &= ~(SZ_32K - 1);
+ entry.res->start = msi_addr;
+ entry.res->end = msi_addr + SZ_32K - 1;
+
+ ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO);
return ret;
}
@@ -1498,10 +1481,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
return ret;
}
- ret = devm_request_pci_bus_resources(dev, res);
- if (ret)
- return ret;
-
ret = phy_init(pcie->phy);
if (ret) {
dev_err(dev, "unable to initialize PCIe PHY\n");
@@ -1517,6 +1496,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
iproc_pcie_perst_ctrl(pcie, true);
iproc_pcie_perst_ctrl(pcie, false);
+ iproc_pcie_invalidate_mapping(pcie);
+
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
@@ -1543,7 +1524,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (iproc_pcie_msi_enable(pcie))
dev_info(dev, "not using iProc MSI\n");
- list_splice_init(res, &host->windows);
host->busnr = 0;
host->dev.parent = dev;
host->ops = &iproc_pcie_ops;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 626a7c352dfd..cb982891b22b 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -216,7 +216,6 @@ struct mtk_pcie {
void __iomem *base;
struct clk *free_ck;
- struct resource mem;
struct list_head ports;
const struct mtk_pcie_soc *soc;
unsigned int busnr;
@@ -661,11 +660,19 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
- struct resource *mem = &pcie->mem;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource *mem = NULL;
+ struct resource_entry *entry;
const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
int err;
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (entry)
+ mem = entry->res;
+ if (!mem)
+ return -EINVAL;
+
/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
if (pcie->base) {
val = readl(pcie->base + PCIE_SYS_CFG_V2);
@@ -1023,39 +1030,15 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
struct mtk_pcie_port *port, *tmp;
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct list_head *windows = &host->windows;
- struct resource_entry *win, *tmp_win;
- resource_size_t io_base;
+ struct resource *bus;
int err;
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- windows, &io_base);
+ err = pci_parse_request_of_pci_ranges(dev, windows,
+ &host->dma_ranges, &bus);
if (err)
return err;
- err = devm_request_pci_bus_resources(dev, windows);
- if (err < 0)
- return err;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp_win, windows) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, win->res, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, win->res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- memcpy(&pcie->mem, win->res, sizeof(*win->res));
- pcie->mem.name = "non-prefetchable";
- break;
- case IORESOURCE_BUS:
- pcie->busnr = win->res->start;
- break;
- }
- }
+ pcie->busnr = bus->start;
for_each_available_child_of_node(node, child) {
int slot;
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a45a6447b01d..3a696ca45bfa 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -140,7 +140,6 @@ struct mobiveil_msi { /* MSI information */
struct mobiveil_pcie {
struct platform_device *pdev;
- struct list_head resources;
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
@@ -235,7 +234,7 @@ static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
{
void *addr;
u32 val;
@@ -250,7 +249,8 @@ static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
return val;
}
-static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size)
{
void *addr;
int ret;
@@ -262,19 +262,19 @@ static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
dev_err(&pcie->pdev->dev, "write CSR address failed\n");
}
-static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
{
- return csr_read(pcie, off, 0x4);
+ return mobiveil_csr_read(pcie, off, 0x4);
}
-static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
{
- csr_write(pcie, val, off, 0x4);
+ mobiveil_csr_write(pcie, val, off, 0x4);
}
static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
{
- return (csr_readl(pcie, LTSSM_STATUS) &
+ return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
}
@@ -323,7 +323,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
- csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
return pcie->config_axi_slave_base + where;
}
@@ -353,13 +353,14 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
chained_irq_enter(chip, desc);
/* read INTx status */
- val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
- mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
intr_status = val & mask;
/* Handle INTx */
if (intr_status & PAB_INTP_INTX_MASK) {
- shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
do {
@@ -373,12 +374,13 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
bit);
/* clear interrupt handled */
- csr_writel(pcie, 1 << (PAB_INTX_START + bit),
- PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie,
+ 1 << (PAB_INTX_START + bit),
+ PAB_INTP_AMBA_MISC_STAT);
}
- shifted_status = csr_readl(pcie,
- PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
} while (shifted_status != 0);
@@ -413,7 +415,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
}
/* Clear the interrupt status */
- csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
chained_irq_exit(chip, desc);
}
@@ -474,24 +476,24 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
return;
}
- value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
- csr_writel(pcie, lower_32_bits(cpu_addr),
- PAB_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
pcie->ib_wins_configured++;
}
@@ -515,27 +517,29 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
* program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
* to 4 KB in PAB_AXI_AMAP_CTRL register
*/
- value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
/*
* program AXI window base with appropriate value in
* PAB_AXI_AMAP_AXI_WIN0 register
*/
- csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie,
+ lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
pcie->ob_wins_configured++;
}
@@ -575,46 +579,47 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
static int mobiveil_host_init(struct mobiveil_pcie *pcie)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u32 value, pab_ctrl, type;
struct resource_entry *win;
/* setup bus numbers */
- value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
value &= 0xff000000;
value |= 0x00ff0100;
- csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
*/
- value = csr_readl(pcie, PCI_COMMAND);
+ value = mobiveil_csr_readl(pcie, PCI_COMMAND);
value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- csr_writel(pcie, value, PCI_COMMAND);
+ mobiveil_csr_writel(pcie, value, PCI_COMMAND);
/*
* program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
* register
*/
- pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
- csr_writel(pcie, pab_ctrl, PAB_CTRL);
+ mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
- csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
*/
- value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
value |= APIO_EN_MASK;
- csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
/* Enable PCIe PIO master */
- value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
value |= 1 << PIO_ENABLE_SHIFT;
- csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
/*
* we'll program one outbound window for config reads and
@@ -631,7 +636,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &pcie->resources) {
+ resource_list_for_each_entry(win, &bridge->windows) {
if (resource_type(win->res) == IORESOURCE_MEM)
type = MEM_WINDOW_TYPE;
else if (resource_type(win->res) == IORESOURCE_IO)
@@ -647,10 +652,10 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
}
/* fixup for PCIe class register */
- value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
value |= (PCI_CLASS_BRIDGE_PCI << 16);
- csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+ mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
@@ -668,9 +673,9 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -684,9 +689,9 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -857,7 +862,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct device *dev = &pdev->dev;
- resource_size_t iobase;
int ret;
/* allocate the PCIe port */
@@ -875,11 +879,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return ret;
}
- INIT_LIST_HEAD(&pcie->resources);
-
/* parse the host bridge base addresses from the device tree file */
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Getting bridge resources failed\n");
return ret;
@@ -892,24 +894,19 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_host_init(pcie);
if (ret) {
dev_err(dev, "Failed to initialize host\n");
- goto error;
+ return ret;
}
/* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(dev, "Failed creating IRQ Domain\n");
- goto error;
+ return ret;
}
irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
- ret = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (ret)
- goto error;
-
/* Initialize bridge */
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -920,13 +917,13 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_bringup_link(pcie);
if (ret) {
dev_info(dev, "link bring-up failed\n");
- goto error;
+ return ret;
}
/* setup the kernel resources for the newly added PCIe root bus */
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
- goto error;
+ return ret;
bus = bridge->bus;
@@ -936,9 +933,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
-error:
- pci_free_resource_list(&pcie->resources);
- return ret;
}
static const struct of_device_id mobiveil_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index f6a669a9af41..759c6542c5c8 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -30,8 +30,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include "../pci.h"
-
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE BIT(31)
@@ -93,8 +91,11 @@
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
+#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
#define SPEED_CHANGE BIT(24)
#define SCRAMBLE_DISABLE BIT(27)
+#define LTSMDIS BIT(31)
+#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
#define PMSR 0x01105c
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
@@ -615,6 +616,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
if (IS_ENABLED(CONFIG_PCI_MSI))
rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
/* Finish initialization - establish a PCI Express link */
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
@@ -1014,40 +1017,43 @@ err_irq1:
}
static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
- struct of_pci_range *range,
+ struct resource_entry *entry,
int *index)
{
- u64 restype = range->flags;
- u64 cpu_addr = range->cpu_addr;
- u64 cpu_end = range->cpu_addr + range->size;
- u64 pci_addr = range->pci_addr;
+ u64 restype = entry->res->flags;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_addr = entry->res->start - entry->offset;
u32 flags = LAM_64BIT | LAR_ENABLE;
u64 mask;
- u64 size;
+ u64 size = resource_size(entry->res);
int idx = *index;
if (restype & IORESOURCE_PREFETCH)
flags |= LAM_PREFETCH;
- /*
- * If the size of the range is larger than the alignment of the start
- * address, we have to use multiple entries to perform the mapping.
- */
- if (cpu_addr > 0) {
- unsigned long nr_zeros = __ffs64(cpu_addr);
- u64 alignment = 1ULL << nr_zeros;
+ while (cpu_addr < cpu_end) {
+ if (idx >= MAX_NR_INBOUND_MAPS - 1) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ /*
+ * If the size of the range is larger than the alignment of
+ * the start address, we have to use multiple entries to
+ * perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
- size = min(range->size, alignment);
- } else {
- size = range->size;
- }
- /* Hardware supports max 4GiB inbound region */
- size = min(size, 1ULL << 32);
+ size = min(size, alignment);
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
- mask = roundup_pow_of_two(size) - 1;
- mask &= ~0xf;
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
- while (cpu_addr < cpu_end) {
/*
* Set up 64-bit inbound regions as the range parser doesn't
* distinguish between 32 and 64-bit types.
@@ -1067,41 +1073,25 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
pci_addr += size;
cpu_addr += size;
idx += 2;
-
- if (idx > MAX_NR_INBOUND_MAPS) {
- dev_err(pcie->dev, "Failed to map inbound regions!\n");
- return -EINVAL;
- }
}
*index = idx;
return 0;
}
-static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
- struct device_node *np)
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int index = 0;
- int err;
-
- if (of_pci_dma_range_parser_init(&parser, np))
- return -EINVAL;
-
- /* Get the dma-ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.cpu_addr + range.size - 1;
-
- dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
- range.flags, range.cpu_addr, end, range.pci_addr);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ int index = 0, err = 0;
- err = rcar_pcie_inbound_ranges(pcie, &range, &index);
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = rcar_pcie_inbound_ranges(pcie, entry, &index);
if (err)
- return err;
+ break;
}
- return 0;
+ return err;
}
static const struct of_device_id rcar_pcie_of_match[] = {
@@ -1138,7 +1128,8 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, pcie);
- err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
+ err = pci_parse_request_of_pci_ranges(dev, &pcie->resources,
+ &bridge->dma_ranges, NULL);
if (err)
goto err_free_bridge;
@@ -1161,7 +1152,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
goto err_unmap_msi_irqs;
}
- err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
+ err = rcar_pcie_parse_map_dma_ranges(pcie);
if (err)
goto err_clk_disable;
@@ -1237,6 +1228,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)
return 0;
/* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
return rcar_pcie_wait_for_dl(pcie);
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index ef8e677ce9d1..d9b63bfa5dd7 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -620,19 +620,13 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
dev_info(dev, "no vpcie3v3 regulator found\n");
}
- rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
- if (IS_ERR(rockchip->vpcie1v8)) {
- if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
- return PTR_ERR(rockchip->vpcie1v8);
- dev_info(dev, "no vpcie1v8 regulator found\n");
- }
+ rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8))
+ return PTR_ERR(rockchip->vpcie1v8);
- rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
- if (IS_ERR(rockchip->vpcie0v9)) {
- if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
- return PTR_ERR(rockchip->vpcie0v9);
- dev_info(dev, "no vpcie0v9 regulator found\n");
- }
+ rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9))
+ return PTR_ERR(rockchip->vpcie0v9);
return 0;
}
@@ -658,27 +652,22 @@ static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
}
}
- if (!IS_ERR(rockchip->vpcie1v8)) {
- err = regulator_enable(rockchip->vpcie1v8);
- if (err) {
- dev_err(dev, "fail to enable vpcie1v8 regulator\n");
- goto err_disable_3v3;
- }
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
}
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- goto err_disable_1v8;
- }
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
}
return 0;
err_disable_1v8:
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie1v8);
err_disable_3v3:
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
@@ -806,19 +795,28 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
+ struct resource_entry *entry;
+ u64 pci_addr, size;
int offset;
int err;
int reg_no;
rockchip_pcie_cfg_configuration_accesses(rockchip,
AXI_WRAPPER_TYPE0_CFG);
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+ rockchip->msg_bus_addr = pci_addr;
- for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
AXI_WRAPPER_MEM_WRITE,
20 - 1,
- rockchip->mem_bus_addr +
- (reg_no << 20),
+ pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC mem outbound ATU failed\n");
@@ -832,14 +830,20 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
return err;
}
- offset = rockchip->mem_size >> 20;
- for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (!entry)
+ return -ENODEV;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+
+ offset = size >> 20;
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip,
reg_no + 1 + offset,
AXI_WRAPPER_IO_WRITE,
20 - 1,
- rockchip->io_bus_addr +
- (reg_no << 20),
+ pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC io outbound ATU failed\n");
@@ -852,8 +856,7 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
AXI_WRAPPER_NOR_MSG,
20 - 1, 0, 0);
- rockchip->msg_bus_addr = rockchip->mem_bus_addr +
- ((reg_no + offset) << 20);
+ rockchip->msg_bus_addr += ((reg_no + offset) << 20);
return err;
}
@@ -897,8 +900,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
rockchip_pcie_disable_clocks(rockchip);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie0v9);
return ret;
}
@@ -908,12 +910,10 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int err;
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- return err;
- }
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
}
err = rockchip_pcie_enable_clocks(rockchip);
@@ -939,8 +939,7 @@ err_err_deinit_port:
err_pcie_resume:
rockchip_pcie_disable_clocks(rockchip);
err_disable_0v9:
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie0v9);
return err;
}
@@ -950,14 +949,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
- struct resource_entry *win;
- resource_size_t io_base;
- struct resource *mem;
- struct resource *io;
+ struct resource *bus_res;
int err;
- LIST_HEAD(res);
-
if (!dev->of_node)
return -ENODEV;
@@ -995,56 +989,23 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err < 0)
goto err_deinit_port;
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &res, &io_base);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, &bus_res);
if (err)
goto err_remove_irq_domain;
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto err_free_res;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "I/O";
- rockchip->io_size = resource_size(io);
- rockchip->io_bus_addr = io->start - win->offset;
- err = pci_remap_iospace(io, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, io);
- continue;
- }
- rockchip->io = io;
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "MEM";
- rockchip->mem_size = resource_size(mem);
- rockchip->mem_bus_addr = mem->start - win->offset;
- break;
- case IORESOURCE_BUS:
- rockchip->root_bus_nr = win->res->start;
- break;
- default:
- continue;
- }
- }
+ rockchip->root_bus_nr = bus_res->start;
err = rockchip_pcie_cfg_atu(rockchip);
if (err)
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
if (!rockchip->msg_region) {
err = -ENOMEM;
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
}
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = rockchip;
bridge->busnr = 0;
@@ -1054,7 +1015,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
bus = bridge->bus;
@@ -1068,10 +1029,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
-err_unmap_iospace:
- pci_unmap_iospace(rockchip->io);
-err_free_res:
- pci_free_resource_list(&res);
err_remove_irq_domain:
irq_domain_remove(rockchip->irq_domain);
err_deinit_port:
@@ -1081,10 +1038,8 @@ err_vpcie:
regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
err_set_vpcie:
rockchip_pcie_disable_clocks(rockchip);
return err;
@@ -1097,7 +1052,6 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(rockchip->root_bus);
pci_remove_root_bus(rockchip->root_bus);
- pci_unmap_iospace(rockchip->io);
irq_domain_remove(rockchip->irq_domain);
rockchip_pcie_deinit_phys(rockchip);
@@ -1108,10 +1062,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
return 0;
}
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 8e87a059ce73..d90dfb354573 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Rockchip AXI PCIe controller driver
*
@@ -304,13 +304,8 @@ struct rockchip_pcie {
struct irq_domain *irq_domain;
int offset;
struct pci_bus *root_bus;
- struct resource *io;
- phys_addr_t io_bus_addr;
- u32 io_size;
void __iomem *msg_region;
- u32 mem_size;
phys_addr_t msg_bus_addr;
- phys_addr_t mem_bus_addr;
bool is_rc;
struct resource *mem_res;
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 45c0f344ccd1..9bd1427f2fd6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -821,8 +821,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
int err;
- resource_size_t iobase = 0;
- LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
@@ -845,24 +843,19 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto error;
-
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- goto error;
+ return err;
}
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_busno;
@@ -874,13 +867,13 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- goto error;
+ return err;
}
}
err = pci_scan_root_bus_bridge(bridge);
if (err)
- goto error;
+ return err;
bus = bridge->bus;
@@ -889,10 +882,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return err;
}
static struct platform_driver nwl_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 5bf3af3b28e6..98e55297815b 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -619,8 +619,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int err;
- resource_size_t iobase = 0;
- LIST_HEAD(res);
if (!dev->of_node)
return -ENODEV;
@@ -647,19 +645,13 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto error;
-
-
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@@ -673,7 +665,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
#endif
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
- goto error;
+ return err;
bus = bridge->bus;
@@ -682,10 +674,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return err;
}
static const struct of_device_id xilinx_pcie_of_match[] = {
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a35d3f3996d7..212842263f55 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -602,16 +602,30 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
/*
* Certain VMD devices may have a root port configuration option which
- * limits the bus range to between 0-127 or 128-255
+ * limits the bus range to between 0-127, 128-255, or 224-255
*/
if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
- u32 vmcap, vmconfig;
-
- pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
- pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
- if (BUS_RESTRICT_CAP(vmcap) &&
- (BUS_RESTRICT_CFG(vmconfig) == 0x1))
- vmd->busn_start = 128;
+ u16 reg16;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCAP, &reg16);
+ if (BUS_RESTRICT_CAP(reg16)) {
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG,
+ &reg16);
+
+ switch (BUS_RESTRICT_CFG(reg16)) {
+ case 1:
+ vmd->busn_start = 128;
+ break;
+ case 2:
+ vmd->busn_start = 224;
+ break;
+ case 3:
+ pci_err(vmd->dev, "Unknown Bus Offset Setting\n");
+ return -ENODEV;
+ default:
+ break;
+ }
+ }
}
res = &vmd->dev->resource[VMD_CFGBAR];
@@ -823,7 +837,7 @@ static int vmd_suspend(struct device *dev)
int i;
for (i = 0; i < vmd->msix_count; i++)
- devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+ devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
pci_save_state(pdev);
return 0;
@@ -854,6 +868,8 @@ static const struct pci_device_id vmd_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 1cfe3687a211..5d74f81ddfe4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,7 +44,7 @@
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
- void *reg[6];
+ void *reg[PCI_STD_NUM_BARS];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
struct delayed_work cmd_handler;
@@ -377,7 +377,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
cancel_delayed_work(&epf_test->cmd_handler);
pci_epc_stop(epc);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
@@ -400,7 +400,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
epc_features = epf_test->epc_features;
- for (bar = BAR_0; bar <= BAR_5; bar += add) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
/*
* pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
@@ -450,7 +450,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = BAR_0; bar <= BAR_5; bar += add) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
@@ -478,7 +478,7 @@ static void pci_epf_configure_bar(struct pci_epf *epf,
bool bar_fixed_64bit;
int i;
- for (i = BAR_0; i <= BAR_5; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
epf_bar = &epf->bar[i];
bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
if (bar_fixed_64bit)
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 2bf8bd1f0563..d2b174ce15de 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -134,7 +134,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
if (pageno < 0)
return NULL;
- *phys_addr = mem->phys_base + (pageno << page_shift);
+ *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index e7b493c22bf3..32455a79372d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -83,7 +83,7 @@ config HOTPLUG_PCI_CPCI_ZT5550
depends on HOTPLUG_PCI_CPCI && X86
help
Say Y here if you have an Performance Technologies (formerly Intel,
- formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
+ formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
To compile this driver as a module, choose M here: the
module will be called cpcihp_zt5550.
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e4c46637f32f..b3869951c0eb 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -449,8 +449,15 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
/* Scan non-hotplug bridges that need to be reconfigured */
for_each_pci_bridge(dev, bus) {
- if (!hotplug_is_native(dev))
- max = pci_scan_bridge(bus, dev, max, 1);
+ if (hotplug_is_native(dev))
+ continue;
+
+ max = pci_scan_bridge(bus, dev, max, 1);
+ if (dev->subordinate) {
+ pcibios_resource_survey_bus(dev->subordinate);
+ pci_bus_size_bridges(dev->subordinate);
+ pci_bus_assign_resources(dev->subordinate);
+ }
}
}
@@ -480,7 +487,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (PCI_SLOT(dev->devfn) == slot->device)
acpiphp_native_scan_bridge(dev);
}
- pci_assign_unassigned_bridge_resources(bus->self);
} else {
LIST_HEAD(add_list);
int max, pass;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 654c972b8ea0..aa61d4c219d7 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,6 +72,7 @@ extern int pciehp_poll_time;
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
@@ -101,6 +102,7 @@ struct controller {
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
};
@@ -172,10 +174,10 @@ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn);
void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
int pciehp_query_power_fault(struct controller *ctrl);
-bool pciehp_card_present(struct controller *ctrl);
-bool pciehp_card_present_or_link_active(struct controller *ctrl);
+int pciehp_card_present(struct controller *ctrl);
+int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
-bool pciehp_check_link_active(struct controller *ctrl);
+int pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index b3122c151b80..312cc45c44c7 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -139,10 +139,15 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl->pcie->port;
+ int ret;
pci_config_pm_runtime_get(pdev);
- *value = pciehp_card_present_or_link_active(ctrl);
+ ret = pciehp_card_present_or_link_active(ctrl);
pci_config_pm_runtime_put(pdev);
+ if (ret < 0)
+ return ret;
+
+ *value = ret;
return 0;
}
@@ -158,13 +163,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static void pciehp_check_presence(struct controller *ctrl)
{
- bool occupied;
+ int occupied;
down_read(&ctrl->reset_lock);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
- if ((occupied && (ctrl->state == OFF_STATE ||
+ if ((occupied > 0 && (ctrl->state == OFF_STATE ||
ctrl->state == BLINKINGON_STATE)) ||
(!occupied && (ctrl->state == ON_STATE ||
ctrl->state == BLINKINGOFF_STATE)))
@@ -253,7 +258,7 @@ static bool pme_is_native(struct pcie_device *dev)
return pcie_ports_native || host->native_pme;
}
-static int pciehp_suspend(struct pcie_device *dev)
+static void pciehp_disable_interrupt(struct pcie_device *dev)
{
/*
* Disable hotplug interrupt so that it does not trigger
@@ -261,7 +266,19 @@ static int pciehp_suspend(struct pcie_device *dev)
*/
if (pme_is_native(dev))
pcie_disable_interrupt(get_service_data(dev));
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pciehp_suspend(struct pcie_device *dev)
+{
+ /*
+ * If the port is already runtime suspended we can keep it that
+ * way.
+ */
+ if (dev_pm_smart_suspend_and_suspended(&dev->port->dev))
+ return 0;
+ pciehp_disable_interrupt(dev);
return 0;
}
@@ -279,6 +296,7 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
return 0;
}
+#endif
static int pciehp_resume(struct pcie_device *dev)
{
@@ -292,6 +310,12 @@ static int pciehp_resume(struct pcie_device *dev)
return 0;
}
+static int pciehp_runtime_suspend(struct pcie_device *dev)
+{
+ pciehp_disable_interrupt(dev);
+ return 0;
+}
+
static int pciehp_runtime_resume(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
@@ -318,10 +342,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
.remove = pciehp_remove,
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
- .runtime_suspend = pciehp_suspend,
+#endif
+ .runtime_suspend = pciehp_runtime_suspend,
.runtime_resume = pciehp_runtime_resume,
#endif /* PM */
};
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 21af7b16d7a4..6503d15effbb 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -226,7 +226,7 @@ void pciehp_handle_disable_request(struct controller *ctrl)
void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
{
- bool present, link_active;
+ int present, link_active;
/*
* If the slot is on and presence or link has changed, turn it off.
@@ -257,7 +257,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
mutex_lock(&ctrl->state_lock);
present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
- if (!present && !link_active) {
+ if (present <= 0 && link_active <= 0) {
mutex_unlock(&ctrl->state_lock);
return;
}
@@ -375,7 +375,8 @@ int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot)
ctrl->request_result = -ENODEV;
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
@@ -408,7 +409,8 @@ int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot)
mutex_unlock(&ctrl->state_lock);
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1a522c1c4177..8a2cb1764386 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -68,7 +68,7 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- while (true) {
+ do {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n",
@@ -81,11 +81,9 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
PCI_EXP_SLTSTA_CC);
return 1;
}
- if (timeout < 0)
- break;
msleep(10);
timeout -= 10;
- }
+ } while (timeout >= 0);
return 0; /* timeout */
}
@@ -201,17 +199,29 @@ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
pcie_do_write_cmd(ctrl, cmd, mask, false);
}
-bool pciehp_check_link_active(struct controller *ctrl)
+/**
+ * pciehp_check_link_active() - Is the link active
+ * @ctrl: PCIe hotplug controller
+ *
+ * Check whether the downstream link is currently active. Note it is
+ * possible that the card is removed immediately after this so the
+ * caller may need to take it into account.
+ *
+ * If the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
- bool ret;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
+ return -ENODEV;
- if (ret)
- ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
return ret;
}
@@ -373,13 +383,29 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
}
-bool pciehp_card_present(struct controller *ctrl)
+/**
+ * pciehp_card_present() - Is the card present
+ * @ctrl: PCIe hotplug controller
+ *
+ * Function checks whether the card is currently present in the slot and
+ * in that case returns true. Note it is possible that the card is
+ * removed immediately after the check so the caller may need to take
+ * this into account.
+ *
+ * It the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_card_present(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- return slot_status & PCI_EXP_SLTSTA_PDS;
+ ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
+ return -ENODEV;
+
+ return !!(slot_status & PCI_EXP_SLTSTA_PDS);
}
/**
@@ -390,10 +416,19 @@ bool pciehp_card_present(struct controller *ctrl)
* Presence Detect State bit, this helper also returns true if the Link Active
* bit is set. This is a concession to broken hotplug ports which hardwire
* Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ *
+ * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
+ * port is not present anymore returns %-ENODEV.
*/
-bool pciehp_card_present_or_link_active(struct controller *ctrl)
+int pciehp_card_present_or_link_active(struct controller *ctrl)
{
- return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+ int ret;
+
+ ret = pciehp_card_present(ctrl);
+ if (ret)
+ return ret;
+
+ return pciehp_check_link_active(ctrl);
}
int pciehp_query_power_fault(struct controller *ctrl)
@@ -583,6 +618,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
irqreturn_t ret;
u32 events;
+ ctrl->ist_running = true;
pci_config_pm_runtime_get(pdev);
/* rerun pciehp_isr() if the port was inaccessible on interrupt */
@@ -629,6 +665,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
up_read(&ctrl->reset_lock);
pci_config_pm_runtime_put(pdev);
+ ctrl->ist_running = false;
wake_up(&ctrl->requester);
return IRQ_HANDLED;
}
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 951f7f216fb3..e408e4021cee 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -185,8 +185,8 @@ static int get_children_props(struct device_node *dn, const __be32 **drc_indexes
/* Verify the existence of 'drc_name' and/or 'drc_type' within the
- * current node. First obtain it's my-drc-index property. Next,
- * obtain the DRC info from it's parent. Use the my-drc-index for
+ * current node. First obtain its my-drc-index property. Next,
+ * obtain the DRC info from its parent. Use the my-drc-index for
* correlation, and obtain/validate the requested properties.
*/
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b3f972e8cfed..1e88fd427757 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -9,7 +9,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
@@ -254,8 +253,14 @@ static ssize_t sriov_numvfs_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ u16 num_vfs;
+
+ /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
+ device_lock(&pdev->dev);
+ num_vfs = pdev->sriov->num_VFs;
+ device_unlock(&pdev->dev);
- return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+ return sprintf(buf, "%u\n", num_vfs);
}
/*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0884bedcfc7a..c7709e49f0e4 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -213,12 +213,13 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
if (pci_msi_ignore_mask)
return 0;
+
desc_addr = pci_msix_desc_addr(desc);
if (!desc_addr)
return 0;
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- if (flag)
+ if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
@@ -861,7 +862,7 @@ static int pci_msi_supported(struct pci_dev *dev, int nvec)
if (!pci_msi_enable)
return 0;
- if (!dev || dev->no_msi || dev->current_state != PCI_D0)
+ if (!dev || dev->no_msi)
return 0;
/*
@@ -972,7 +973,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
int nr_entries;
int i, j;
- if (!pci_msi_supported(dev, nvec))
+ if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0)
return -EINVAL;
nr_entries = pci_msix_vec_count(dev);
@@ -1058,7 +1059,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
int nvec;
int rc;
- if (!pci_msi_supported(dev, minvec))
+ if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0)
return -EINVAL;
/* Check whether driver already requested MSI-X IRQs */
@@ -1315,22 +1316,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
}
EXPORT_SYMBOL(pci_irq_get_affinity);
-/**
- * pci_irq_get_node - return the NUMA node of a particular MSI vector
- * @pdev: PCI device to operate on
- * @vec: device-relative interrupt vector index (0-based).
- */
-int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
- const struct cpumask *mask;
-
- mask = pci_irq_get_affinity(pdev, vec);
- if (mask)
- return local_memory_node(cpu_to_node(cpumask_first(mask)));
- return dev_to_node(&pdev->dev);
-}
-EXPORT_SYMBOL(pci_irq_get_node);
-
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
return to_pci_dev(desc->dev);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 36891e7deee3..81ceeaa6f1d5 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -236,7 +236,6 @@ void of_pci_check_probe_only(void)
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
-#if defined(CONFIG_OF_ADDRESS)
/**
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
@@ -255,16 +254,18 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
-int devm_of_pci_get_host_bridge_resources(struct device *dev,
+static int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
+ struct list_head *resources,
+ struct list_head *ib_resources,
+ resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
- char range_type[4];
+ const char *range_type;
int err;
if (io_base)
@@ -298,12 +299,12 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
- snprintf(range_type, 4, " IO");
+ range_type = "IO";
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
- snprintf(range_type, 4, "MEM");
+ range_type = "MEM";
else
- snprintf(range_type, 4, "err");
- dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
+ range_type = "err";
+ dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
range_type, range.cpu_addr,
range.cpu_addr + range.size - 1, range.pci_addr);
@@ -340,14 +341,54 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
}
+ /* Check for dma-ranges property */
+ if (!ib_resources)
+ return 0;
+ err = of_pci_dma_range_parser_init(&parser, dev_node);
+ if (err)
+ return 0;
+
+ dev_dbg(dev, "Parsing dma-ranges property...\n");
+ for_each_of_pci_range(&parser, &range) {
+ struct resource_entry *entry;
+ /*
+ * If we failed translation or got a zero-sized region
+ * then skip this range
+ */
+ if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
+ range.cpu_addr == OF_BAD_ADDR || range.size == 0)
+ continue;
+
+ dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
+ "IB MEM", range.cpu_addr,
+ range.cpu_addr + range.size - 1, range.pci_addr);
+
+
+ err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
+ if (err)
+ continue;
+
+ res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Keep the resource list sorted */
+ resource_list_for_each_entry(entry, ib_resources)
+ if (entry->res->start > res->start)
+ break;
+
+ pci_add_resource_offset(&entry->node, res,
+ res->start - range.pci_addr);
+ }
+
return 0;
failed:
pci_free_resource_list(resources);
return err;
}
-EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
-#endif /* CONFIG_OF_ADDRESS */
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
@@ -482,6 +523,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
+ struct list_head *ib_resources,
struct resource **bus_range)
{
int err, res_valid = 0;
@@ -489,8 +531,10 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
+ if (ib_resources)
+ INIT_LIST_HEAD(ib_resources);
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
- &iobase);
+ ib_resources, &iobase);
if (err)
return err;
@@ -530,6 +574,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
pci_free_resource_list(resources);
return err;
}
+EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges);
#endif /* CONFIG_PCI */
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 5fd90105510d..fffa77093c08 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -270,10 +270,10 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
unsigned int flags)
{
- bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+ bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->conf.cache_line_size = 0x10;
- bridge->conf.status = PCI_STATUS_CAP_LIST;
+ bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
bridge->pci_regs_behavior = kmemdup(pci_regs_behavior,
sizeof(pci_regs_behavior),
GFP_KERNEL);
@@ -284,8 +284,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
/* Set PCIe v2, root port, slot support */
- bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT;
+ bridge->pcie_conf.cap =
+ cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
sizeof(pcie_cap_regs_behavior),
@@ -327,7 +328,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
int reg = where & ~3;
pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge,
int reg, u32 *value);
- u32 *cfgspace;
+ __le32 *cfgspace;
const struct pci_bridge_reg_behavior *behavior;
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END) {
@@ -343,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
reg -= PCI_CAP_PCIE_START;
read_op = bridge->ops->read_pcie;
- cfgspace = (u32 *) &bridge->pcie_conf;
+ cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
} else {
read_op = bridge->ops->read_base;
- cfgspace = (u32 *) &bridge->conf;
+ cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
}
@@ -357,7 +358,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
ret = PCI_BRIDGE_EMUL_NOT_HANDLED;
if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED)
- *value = cfgspace[reg / 4];
+ *value = le32_to_cpu(cfgspace[reg / 4]);
/*
* Make sure we never return any reserved bit with a value
@@ -387,7 +388,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
int mask, ret, old, new, shift;
void (*write_op)(struct pci_bridge_emul *bridge, int reg,
u32 old, u32 new, u32 mask);
- u32 *cfgspace;
+ __le32 *cfgspace;
const struct pci_bridge_reg_behavior *behavior;
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END)
@@ -414,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
reg -= PCI_CAP_PCIE_START;
write_op = bridge->ops->write_pcie;
- cfgspace = (u32 *) &bridge->pcie_conf;
+ cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
} else {
write_op = bridge->ops->write_base;
- cfgspace = (u32 *) &bridge->conf;
+ cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
}
@@ -431,7 +432,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
/* Clear the W1C bits */
new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
- cfgspace[reg / 4] = new;
+ cfgspace[reg / 4] = cpu_to_le32(new);
if (write_op)
write_op(bridge, reg, old, new, mask);
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index e65b1b79899d..b31883022a8e 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -6,65 +6,65 @@
/* PCI configuration space of a PCI-to-PCI bridge. */
struct pci_bridge_emul_conf {
- u16 vendor;
- u16 device;
- u16 command;
- u16 status;
- u32 class_revision;
+ __le16 vendor;
+ __le16 device;
+ __le16 command;
+ __le16 status;
+ __le32 class_revision;
u8 cache_line_size;
u8 latency_timer;
u8 header_type;
u8 bist;
- u32 bar[2];
+ __le32 bar[2];
u8 primary_bus;
u8 secondary_bus;
u8 subordinate_bus;
u8 secondary_latency_timer;
u8 iobase;
u8 iolimit;
- u16 secondary_status;
- u16 membase;
- u16 memlimit;
- u16 pref_mem_base;
- u16 pref_mem_limit;
- u32 prefbaseupper;
- u32 preflimitupper;
- u16 iobaseupper;
- u16 iolimitupper;
+ __le16 secondary_status;
+ __le16 membase;
+ __le16 memlimit;
+ __le16 pref_mem_base;
+ __le16 pref_mem_limit;
+ __le32 prefbaseupper;
+ __le32 preflimitupper;
+ __le16 iobaseupper;
+ __le16 iolimitupper;
u8 capabilities_pointer;
u8 reserve[3];
- u32 romaddr;
+ __le32 romaddr;
u8 intline;
u8 intpin;
- u16 bridgectrl;
+ __le16 bridgectrl;
};
/* PCI configuration space of the PCIe capabilities */
struct pci_bridge_emul_pcie_conf {
u8 cap_id;
u8 next;
- u16 cap;
- u32 devcap;
- u16 devctl;
- u16 devsta;
- u32 lnkcap;
- u16 lnkctl;
- u16 lnksta;
- u32 slotcap;
- u16 slotctl;
- u16 slotsta;
- u16 rootctl;
- u16 rsvd;
- u32 rootsta;
- u32 devcap2;
- u16 devctl2;
- u16 devsta2;
- u32 lnkcap2;
- u16 lnkctl2;
- u16 lnksta2;
- u32 slotcap2;
- u16 slotctl2;
- u16 slotsta2;
+ __le16 cap;
+ __le32 devcap;
+ __le16 devctl;
+ __le16 devsta;
+ __le32 lnkcap;
+ __le16 lnkctl;
+ __le16 lnksta;
+ __le32 slotcap;
+ __le16 slotctl;
+ __le16 slotsta;
+ __le16 rootctl;
+ __le16 rsvd;
+ __le32 rootsta;
+ __le32 devcap2;
+ __le16 devctl2;
+ __le16 devsta2;
+ __le32 lnkcap2;
+ __le16 lnkctl2;
+ __le16 lnksta2;
+ __le32 slotcap2;
+ __le16 slotctl2;
+ __le16 slotsta2;
};
struct pci_bridge_emul;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a8124e47bf6e..0454ca0e4e3f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -315,7 +315,8 @@ static long local_pci_probe(void *_ddi)
* Probe function should return < 0 for failure, 0 for success
* Treat values > 0 as success, but warn.
*/
- dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+ pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n",
+ rc);
return 0;
}
@@ -517,6 +518,12 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
return 0;
}
+static void pci_pm_default_resume(struct pci_dev *pci_dev)
+{
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+ pci_enable_wake(pci_dev, PCI_D0, false);
+}
+
#endif
#ifdef CONFIG_PM_SLEEP
@@ -524,6 +531,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
pci_pme_restore(pci_dev);
}
@@ -578,9 +586,9 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pS\n",
- drv->suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: Device state not saved by %pS\n",
+ drv->suspend);
}
}
@@ -592,46 +600,17 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
-
- if (drv && drv->suspend_late) {
- pci_power_t prev = pci_dev->current_state;
- int error;
-
- error = drv->suspend_late(pci_dev, state);
- suspend_report_result(drv->suspend_late, error);
- if (error)
- return error;
-
- if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
- && pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pS\n",
- drv->suspend_late);
- goto Fixup;
- }
- }
if (!pci_dev->state_saved)
pci_save_state(pci_dev);
pci_pm_set_unknown_state(pci_dev);
-Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
return 0;
}
-static int pci_legacy_resume_early(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
-
- return drv && drv->resume_early ?
- drv->resume_early(pci_dev) : 0;
-}
-
static int pci_legacy_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -645,12 +624,6 @@ static int pci_legacy_resume(struct device *dev)
/* Auxiliary functions used by the new power management framework */
-static void pci_pm_default_resume(struct pci_dev *pci_dev)
-{
- pci_fixup_device(pci_fixup_resume, pci_dev);
- pci_enable_wake(pci_dev, PCI_D0, false);
-}
-
static void pci_pm_default_suspend(struct pci_dev *pci_dev)
{
/* Disable non-bridge devices without PM support */
@@ -661,16 +634,15 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
struct pci_driver *drv = pci_dev->driver;
- bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
- || drv->resume_early);
+ bool ret = drv && (drv->suspend || drv->resume);
/*
* Legacy PM support is used by default, so warn if the new framework is
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
- WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
- drv->name, pci_dev->vendor, pci_dev->device);
+ pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n",
+ pci_dev->vendor, pci_dev->device);
return ret;
}
@@ -679,11 +651,11 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
static int pci_pm_prepare(struct device *dev)
{
- struct device_driver *drv = dev->driver;
struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (drv && drv->pm && drv->pm->prepare) {
- int error = drv->pm->prepare(dev);
+ if (pm && pm->prepare) {
+ int error = pm->prepare(dev);
if (error < 0)
return error;
@@ -793,9 +765,9 @@ static int pci_pm_suspend(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->suspend);
}
}
@@ -841,9 +813,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->suspend_noirq);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->suspend_noirq);
goto Fixup;
}
}
@@ -865,7 +837,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_prepare_to_sleep(pci_dev);
}
- dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
+ pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n",
pci_power_name(pci_dev->current_state));
if (pci_dev->current_state == PCI_D0) {
@@ -880,7 +852,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
}
if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
- dev_dbg(dev, "PCI PM: Skipped\n");
+ pci_dbg(pci_dev, "PCI PM: Skipped\n");
goto Fixup;
}
@@ -917,8 +889,9 @@ Fixup:
static int pci_pm_resume_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev_state = pci_dev->current_state;
+ bool skip_bus_pm = pci_dev->skip_bus_pm;
if (dev_pm_may_skip_resume(dev))
return 0;
@@ -937,27 +910,28 @@ static int pci_pm_resume_noirq(struct device *dev)
* configuration here and attempting to put them into D0 again is
* pointless, so avoid doing that.
*/
- if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
+ if (!(skip_bus_pm && pm_suspend_no_platform()))
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ pcie_pme_root_status_cleanup(pci_dev);
- if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
+ if (!skip_bus_pm && prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
- pcie_pme_root_status_cleanup(pci_dev);
+ if (pci_has_legacy_pm_support(pci_dev))
+ return 0;
- if (drv && drv->pm && drv->pm->resume_noirq)
- error = drv->pm->resume_noirq(dev);
+ if (pm && pm->resume_noirq)
+ return pm->resume_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error = 0;
/*
* This is necessary for the suspend error path in which resume is
@@ -973,12 +947,12 @@ static int pci_pm_resume(struct device *dev)
if (pm) {
if (pm->resume)
- error = pm->resume(dev);
+ return pm->resume(dev);
} else {
pci_pm_reenable_device(pci_dev);
}
- return error;
+ return 0;
}
#else /* !CONFIG_SUSPEND */
@@ -993,7 +967,6 @@ static int pci_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
-
/*
* pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
* a hibernate transition
@@ -1039,16 +1012,16 @@ static int pci_pm_freeze(struct device *dev)
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
- if (drv && drv->pm && drv->pm->freeze_noirq) {
+ if (pm && pm->freeze_noirq) {
int error;
- error = drv->pm->freeze_noirq(dev);
- suspend_report_result(drv->pm->freeze_noirq, error);
+ error = pm->freeze_noirq(dev);
+ suspend_report_result(pm->freeze_noirq, error);
if (error)
return error;
}
@@ -1067,8 +1040,8 @@ static int pci_pm_freeze_noirq(struct device *dev)
static int pci_pm_thaw_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error;
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
@@ -1076,21 +1049,25 @@ static int pci_pm_thaw_noirq(struct device *dev)
return error;
}
- if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
-
/*
- * pci_restore_state() requires the device to be in D0 (because of MSI
- * restoration among other things), so force it into D0 in case the
- * driver's "freeze" callbacks put it into a low-power state directly.
+ * The pm->thaw_noirq() callback assumes the device has been
+ * returned to D0 and its config state has been restored.
+ *
+ * In addition, pci_restore_state() restores MSI-X state in MMIO
+ * space, which requires the device to be in D0, so return it to D0
+ * in case the driver's "freeze" callbacks put it into a low-power
+ * state.
*/
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
- if (drv && drv->pm && drv->pm->thaw_noirq)
- error = drv->pm->thaw_noirq(dev);
+ if (pci_has_legacy_pm_support(pci_dev))
+ return 0;
+
+ if (pm && pm->thaw_noirq)
+ return pm->thaw_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_thaw(struct device *dev)
@@ -1161,24 +1138,24 @@ static int pci_pm_poweroff_late(struct device *dev)
static int pci_pm_poweroff_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- if (pci_has_legacy_pm_support(to_pci_dev(dev)))
+ if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
- if (!drv || !drv->pm) {
+ if (!pm) {
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
return 0;
}
- if (drv->pm->poweroff_noirq) {
+ if (pm->poweroff_noirq) {
int error;
- error = drv->pm->poweroff_noirq(dev);
- suspend_report_result(drv->pm->poweroff_noirq, error);
+ error = pm->poweroff_noirq(dev);
+ suspend_report_result(pm->poweroff_noirq, error);
if (error)
return error;
}
@@ -1204,8 +1181,8 @@ static int pci_pm_poweroff_noirq(struct device *dev)
static int pci_pm_restore_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error;
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
@@ -1217,19 +1194,18 @@ static int pci_pm_restore_noirq(struct device *dev)
pci_fixup_device(pci_fixup_resume_early, pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
+ return 0;
- if (drv && drv->pm && drv->pm->restore_noirq)
- error = drv->pm->restore_noirq(dev);
+ if (pm && pm->restore_noirq)
+ return pm->restore_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_restore(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error = 0;
/*
* This is necessary for the hibernation error path in which restore is
@@ -1245,12 +1221,12 @@ static int pci_pm_restore(struct device *dev)
if (pm) {
if (pm->restore)
- error = pm->restore(dev);
+ return pm->restore(dev);
} else {
pci_pm_reenable_device(pci_dev);
}
- return error;
+ return 0;
}
#else /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -1295,11 +1271,11 @@ static int pci_pm_runtime_suspend(struct device *dev)
* log level.
*/
if (error == -EBUSY || error == -EAGAIN) {
- dev_dbg(dev, "can't suspend now (%ps returned %d)\n",
+ pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
} else if (error) {
- dev_err(dev, "can't suspend (%ps returned %d)\n",
+ pci_err(pci_dev, "can't suspend (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
}
@@ -1310,9 +1286,9 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (pm && pm->runtime_suspend
&& !pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->runtime_suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->runtime_suspend);
return 0;
}
@@ -1326,9 +1302,10 @@ static int pci_pm_runtime_suspend(struct device *dev)
static int pci_pm_runtime_resume(struct device *dev)
{
- int rc = 0;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev_state = pci_dev->current_state;
+ int error = 0;
/*
* Restoring config space is necessary even if the device is not bound
@@ -1341,22 +1318,23 @@ static int pci_pm_runtime_resume(struct device *dev)
return 0;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
- pci_enable_wake(pci_dev, PCI_D0, false);
- pci_fixup_device(pci_fixup_resume, pci_dev);
+ pci_pm_default_resume(pci_dev);
+
+ if (prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
if (pm && pm->runtime_resume)
- rc = pm->runtime_resume(dev);
+ error = pm->runtime_resume(dev);
pci_dev->runtime_d3cold = false;
- return rc;
+ return error;
}
static int pci_pm_runtime_idle(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret = 0;
/*
* If pci_dev->driver is not set (unbound), the device should
@@ -1369,9 +1347,9 @@ static int pci_pm_runtime_idle(struct device *dev)
return -ENOSYS;
if (pm->runtime_idle)
- ret = pm->runtime_idle(dev);
+ return pm->runtime_idle(dev);
- return ret;
+ return 0;
}
static const struct dev_pm_ops pci_dev_pm_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 793412954529..13f766db0684 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1122,7 +1122,7 @@ static void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct bin_attribute *res_attr;
res_attr = pdev->res_attr[i];
@@ -1193,7 +1193,7 @@ static int pci_create_resource_files(struct pci_dev *pdev)
int retval;
/* Expose the PCI resources from this device as files */
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
/* skip empty resources */
if (!pci_resource_len(pdev, i))
@@ -1330,7 +1330,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
int retval;
pcie_vpd_create_sysfs_dev_files(dev);
- pcie_aspm_create_sysfs_dev_files(dev);
if (dev->reset_fn) {
retval = device_create_file(&dev->dev, &dev_attr_reset);
@@ -1340,7 +1339,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
return 0;
error:
- pcie_aspm_remove_sysfs_dev_files(dev);
pcie_vpd_remove_sysfs_dev_files(dev);
return retval;
}
@@ -1416,7 +1414,6 @@ err:
static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
{
pcie_vpd_remove_sysfs_dev_files(dev);
- pcie_aspm_remove_sysfs_dev_files(dev);
if (dev->reset_fn) {
device_remove_file(&dev->dev, &dev_attr_reset);
dev->reset_fn = 0;
@@ -1539,24 +1536,6 @@ const struct attribute_group *pci_dev_groups[] = {
NULL,
};
-static const struct attribute_group pci_bridge_group = {
- .attrs = pci_bridge_attrs,
-};
-
-const struct attribute_group *pci_bridge_groups[] = {
- &pci_bridge_group,
- NULL,
-};
-
-static const struct attribute_group pcie_dev_group = {
- .attrs = pcie_dev_attrs,
-};
-
-const struct attribute_group *pcie_dev_groups[] = {
- &pcie_dev_group,
- NULL,
-};
-
static const struct attribute_group pci_dev_hp_attr_group = {
.attrs = pci_dev_hp_attrs,
.is_visible = pci_dev_hp_attrs_are_visible,
@@ -1588,6 +1567,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
#endif
+#ifdef CONFIG_PCIEASPM
+ &aspm_ctrl_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fcfaadc774ee..e87196cc1a7f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/init.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -85,10 +86,17 @@ unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
#define DEFAULT_HOTPLUG_IO_SIZE (256)
-#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
-/* pci=hpmemsize=nnM,hpiosize=nn can override this */
+#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
+#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
+/* hpiosize=nn can override this */
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
-unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
+ * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
+ * pci=hpmemsize=nnM overrides both
+ */
+unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
+unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
#define DEFAULT_HOTPLUG_BUS_SIZE 1
unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
@@ -674,7 +682,7 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
{
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
if (r->start && resource_contains(r, res))
@@ -834,14 +842,16 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
/*
- * Validate current state:
- * Can enter D0 from any state, but if we can only go deeper
- * to sleep if we're already in a low power state
+ * Validate transition: We can enter D0 from any state, but if
+ * we're already in a low-power state, we can only go deeper. E.g.,
+ * we can go from D1 to D3, but we can't go directly from D3 to D1;
+ * we'd have to go from D3 to D0, then to D1.
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
- pci_err(dev, "invalid power transition (from state %d to %d)\n",
- dev->current_state, state);
+ pci_err(dev, "invalid power transition (from %s to %s)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
return -EINVAL;
}
@@ -851,6 +861,12 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (pmcsr == (u16) ~0) {
+ pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+ return -EIO;
+ }
/*
* If we're (effectively) in D3, force entire word to 0.
@@ -886,13 +902,14 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
- udelay(PCI_PM_D2_DELAY);
+ msleep(PCI_PM_D2_DELAY);
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state)
- pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
- dev->current_state);
+ pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
/*
* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@@ -963,7 +980,7 @@ void pci_refresh_power_state(struct pci_dev *dev)
* @dev: PCI device to handle.
* @state: State to put the device into.
*/
-static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
{
int error;
@@ -979,6 +996,7 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
return error;
}
+EXPORT_SYMBOL_GPL(pci_platform_power_transition);
/**
* pci_wakeup - Wake up a PCI device
@@ -1002,34 +1020,70 @@ void pci_wakeup_bus(struct pci_bus *bus)
pci_walk_bus(bus, pci_wakeup, NULL);
}
+static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+{
+ int delay = 1;
+ u32 id;
+
+ /*
+ * After reset, the device should not silently discard config
+ * requests, but it may still indicate that it needs more time by
+ * responding to them with CRS completions. The Root Port will
+ * generally synthesize ~0 data to complete the read (except when
+ * CRS SV is enabled and the read was for the Vendor ID; in that
+ * case it synthesizes 0x0001 data).
+ *
+ * Wait for the device to return a non-CRS completion. Read the
+ * Command register instead of Vendor ID so we don't have to
+ * contend with the CRS SV value.
+ */
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ while (id == ~0) {
+ if (delay > timeout) {
+ pci_warn(dev, "not ready %dms after %s; giving up\n",
+ delay - 1, reset_type);
+ return -ENOTTY;
+ }
+
+ if (delay > 1000)
+ pci_info(dev, "not ready %dms after %s; waiting\n",
+ delay - 1, reset_type);
+
+ msleep(delay);
+ delay *= 2;
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ }
+
+ if (delay > 1000)
+ pci_info(dev, "ready %dms after %s\n", delay - 1,
+ reset_type);
+
+ return 0;
+}
+
/**
- * __pci_start_power_transition - Start power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
+ * pci_power_up - Put the given device into D0
+ * @dev: PCI device to power up
*/
-static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_power_up(struct pci_dev *dev)
{
- if (state == PCI_D0) {
- pci_platform_power_transition(dev, PCI_D0);
+ pci_platform_power_transition(dev, PCI_D0);
+
+ /*
+ * Mandatory power management transition delays are handled in
+ * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
+ * corresponding bridge.
+ */
+ if (dev->runtime_d3cold) {
/*
- * Mandatory power management transition delays, see
- * PCI Express Base Specification Revision 2.0 Section
- * 6.6.1: Conventional Reset. Do not delay for
- * devices powered on/off by corresponding bridge,
- * because have already delayed for the bridge.
+ * When powering on a bridge from D3cold, the whole hierarchy
+ * may be powered on into D0uninitialized state, resume them to
+ * give them a chance to suspend again
*/
- if (dev->runtime_d3cold) {
- if (dev->d3cold_delay && !dev->imm_ready)
- msleep(dev->d3cold_delay);
- /*
- * When powering on a bridge from D3cold, the
- * whole hierarchy may be powered on into
- * D0uninitialized state, resume them to give
- * them a chance to suspend again
- */
- pci_wakeup_bus(dev->subordinate);
- }
+ pci_wakeup_bus(dev->subordinate);
}
+
+ return pci_raw_set_power_state(dev, PCI_D0);
}
/**
@@ -1057,27 +1111,6 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
}
/**
- * __pci_complete_power_transition - Complete power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
- *
- * This function should not be called directly by device drivers.
- */
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
-{
- int ret;
-
- if (state <= PCI_D0)
- return -EINVAL;
- ret = pci_platform_power_transition(dev, state);
- /* Power off the bridge may power off the whole hierarchy */
- if (!ret && state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
- return ret;
-}
-EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
-
-/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
@@ -1117,7 +1150,8 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (dev->current_state == state)
return 0;
- __pci_start_power_transition(dev, state);
+ if (state == PCI_D0)
+ return pci_power_up(dev);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1133,23 +1167,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
PCI_D3hot : state);
- if (!__pci_complete_power_transition(dev, state))
- error = 0;
+ if (pci_platform_power_transition(dev, state))
+ return error;
- return error;
-}
-EXPORT_SYMBOL(pci_set_power_state);
+ /* Powering off a bridge may power off the whole hierarchy */
+ if (state == PCI_D3cold)
+ pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
-/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
- __pci_start_power_transition(dev, PCI_D0);
- pci_raw_set_power_state(dev, PCI_D0);
- pci_update_current_state(dev, PCI_D0);
+ return 0;
}
+EXPORT_SYMBOL(pci_set_power_state);
/**
* pci_choose_state - Choose the power state of a PCI device
@@ -1359,6 +1386,7 @@ int pci_save_state(struct pci_dev *dev)
pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
+ pci_save_aer_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1472,6 +1500,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_dpc_state(dev);
pci_cleanup_aer_error_status_regs(dev);
+ pci_restore_aer_state(dev);
pci_restore_config_space(dev);
@@ -3766,7 +3795,7 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
{
int i;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
pci_release_region(pdev, i);
}
@@ -3777,7 +3806,7 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
{
int i;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
if (__pci_request_region(pdev, i, res_name, excl))
goto err_out;
@@ -3825,7 +3854,7 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
void pci_release_regions(struct pci_dev *pdev)
{
- pci_release_selected_regions(pdev, (1 << 6) - 1);
+ pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
}
EXPORT_SYMBOL(pci_release_regions);
@@ -3844,7 +3873,8 @@ EXPORT_SYMBOL(pci_release_regions);
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
- return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
+ return pci_request_selected_regions(pdev,
+ ((1 << PCI_STD_NUM_BARS) - 1), res_name);
}
EXPORT_SYMBOL(pci_request_regions);
@@ -3866,7 +3896,7 @@ EXPORT_SYMBOL(pci_request_regions);
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << 6) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), res_name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4428,47 +4458,6 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
-static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
-{
- int delay = 1;
- u32 id;
-
- /*
- * After reset, the device should not silently discard config
- * requests, but it may still indicate that it needs more time by
- * responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 data to complete the read (except when
- * CRS SV is enabled and the read was for the Vendor ID; in that
- * case it synthesizes 0x0001 data).
- *
- * Wait for the device to return a non-CRS completion. Read the
- * Command register instead of Vendor ID so we don't have to
- * contend with the CRS SV value.
- */
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- while (id == ~0) {
- if (delay > timeout) {
- pci_warn(dev, "not ready %dms after %s; giving up\n",
- delay - 1, reset_type);
- return -ENOTTY;
- }
-
- if (delay > 1000)
- pci_info(dev, "not ready %dms after %s; waiting\n",
- delay - 1, reset_type);
-
- msleep(delay);
- delay *= 2;
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- }
-
- if (delay > 1000)
- pci_info(dev, "ready %dms after %s\n", delay - 1,
- reset_type);
-
- return 0;
-}
-
/**
* pcie_has_flr - check if a device supports function level resets
* @dev: device to check
@@ -4603,16 +4592,19 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
pci_dev_d3_sleep(dev);
- return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
+ return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
}
+
/**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
+ int delay)
{
int timeout = 1000;
bool ret;
@@ -4649,13 +4641,144 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
timeout -= 10;
}
if (active && ret)
- msleep(100);
+ msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
+/*
+ * Find maximum D3cold delay required by all the devices on the bus. The
+ * spec says 100 ms, but firmware can lower it and we allow drivers to
+ * increase it as well.
+ *
+ * Called with @pci_bus_sem locked for reading.
+ */
+static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+{
+ const struct pci_dev *pdev;
+ int min_delay = 100;
+ int max_delay = 0;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (pdev->d3cold_delay < min_delay)
+ min_delay = pdev->d3cold_delay;
+ if (pdev->d3cold_delay > max_delay)
+ max_delay = pdev->d3cold_delay;
+ }
+
+ return max(min_delay, max_delay);
+}
+
+/**
+ * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
+ * @dev: PCI bridge
+ *
+ * Handle necessary delays before access to the devices on the secondary
+ * side of the bridge are permitted after D3cold to D0 transition.
+ *
+ * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
+ * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
+ * 4.3.2.
+ */
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+{
+ struct pci_dev *child;
+ int delay;
+
+ if (pci_dev_is_disconnected(dev))
+ return;
+
+ if (!pci_is_bridge(dev) || !dev->bridge_d3)
+ return;
+
+ down_read(&pci_bus_sem);
+
+ /*
+ * We only deal with devices that are present currently on the bus.
+ * For any hot-added devices the access delay is handled in pciehp
+ * board_added(). In case of ACPI hotplug the firmware is expected
+ * to configure the devices before OS is notified.
+ */
+ if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ /* Take d3cold_delay requirements into account */
+ delay = pci_bus_max_d3cold_delay(dev->subordinate);
+ if (!delay) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+ bus_list);
+ up_read(&pci_bus_sem);
+
+ /*
+ * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
+ * accessing the device after reset (that is 1000 ms + 100 ms). In
+ * practice this should not be needed because we don't do power
+ * management for them (see pci_bridge_d3_possible()).
+ */
+ if (!pci_is_pcie(dev)) {
+ pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
+ msleep(1000 + delay);
+ return;
+ }
+
+ /*
+ * For PCIe downstream and root ports that do not support speeds
+ * greater than 5 GT/s need to wait minimum 100 ms. For higher
+ * speeds (gen3) we need to wait first for the data link layer to
+ * become active.
+ *
+ * However, 100 ms is the minimum and the PCIe spec says the
+ * software must allow at least 1s before it can determine that the
+ * device that did not respond is a broken device. There is
+ * evidence that 100 ms is not always enough, for example certain
+ * Titan Ridge xHCI controller does not always respond to
+ * configuration requests if we only wait for 100 ms (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
+ *
+ * Therefore we wait for 100 ms and check for the device presence.
+ * If it is still not present give it an additional 100 ms.
+ */
+ if (!pcie_downstream_port(dev))
+ return;
+
+ if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+ msleep(delay);
+ } else {
+ pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
+ delay);
+ if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ /* Did not train, no need to wait any further */
+ return;
+ }
+ }
+
+ if (!pci_device_is_present(child)) {
+ pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+ msleep(delay);
+ }
+}
+
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
@@ -6304,8 +6427,13 @@ static int __init pci_setup(char *str)
pcie_ecrc_get_policy(str + 5);
} else if (!strncmp(str, "hpiosize=", 9)) {
pci_hotplug_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "hpmmiosize=", 11)) {
+ pci_hotplug_mmio_size = memparse(str + 11, &str);
+ } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
+ pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
- pci_hotplug_mem_size = memparse(str + 10, &str);
+ pci_hotplug_mmio_size = memparse(str + 10, &str);
+ pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
} else if (!strncmp(str, "hpbussize=", 10)) {
pci_hotplug_bus_size =
simple_strtoul(str + 10, &str, 0);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 3f6947ee3324..a0a53bd05a0b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -12,6 +12,7 @@ extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
+bool pcie_cap_has_rtctl(const struct pci_dev *dev);
/* Functions internal to the PCI core code */
@@ -85,7 +86,7 @@ struct pci_platform_pm_ops {
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_refresh_power_state(struct pci_dev *dev);
-void pci_power_up(struct pci_dev *dev);
+int pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
@@ -104,6 +105,7 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -218,7 +220,8 @@ extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
extern unsigned long pci_hotplug_io_size;
-extern unsigned long pci_hotplug_mem_size;
+extern unsigned long pci_hotplug_mmio_size;
+extern unsigned long pci_hotplug_mmio_pref_size;
extern unsigned long pci_hotplug_bus_size;
/**
@@ -456,6 +459,22 @@ static inline void pci_ats_init(struct pci_dev *d) { }
static inline void pci_restore_ats_state(struct pci_dev *dev) { }
#endif /* CONFIG_PCI_ATS */
+#ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *dev);
+void pci_restore_pri_state(struct pci_dev *pdev);
+#else
+static inline void pci_pri_init(struct pci_dev *dev) { }
+static inline void pci_restore_pri_state(struct pci_dev *pdev) { }
+#endif
+
+#ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *dev);
+void pci_restore_pasid_state(struct pci_dev *pdev);
+#else
+static inline void pci_pasid_init(struct pci_dev *dev) { }
+static inline void pci_restore_pasid_state(struct pci_dev *pdev) { }
+#endif
+
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
@@ -541,14 +560,6 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
-#ifdef CONFIG_PCIEASPM_DEBUG
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
-#else
-static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { }
-static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { }
-#endif
-
#ifdef CONFIG_PCIE_ECRC
void pcie_set_ecrc_checking(struct pci_dev *dev);
void pcie_ecrc_get_policy(char *str);
@@ -630,19 +641,6 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
#endif /* CONFIG_OF */
-#if defined(CONFIG_OF_ADDRESS)
-int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base);
-#else
-static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
-{
- return -EINVAL;
-}
-#endif
-
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
@@ -667,4 +665,8 @@ static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
}
#endif
+#ifdef CONFIG_PCIEASPM
+extern const struct attribute_group aspm_ctrl_attr_group;
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 362eb8cfa53b..6e3c04b46fb1 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -4,7 +4,6 @@
#
config PCIEPORTBUS
bool "PCI Express Port Bus support"
- depends on PCI
help
This enables PCI Express Port Bus support. Users can then enable
support for Native Hot-Plug, Advanced Error Reporting, Power
@@ -63,7 +62,6 @@ config PCIE_ECRC
#
config PCIEASPM
bool "PCI Express ASPM control" if EXPERT
- depends on PCI && PCIEPORTBUS
default y
help
This enables OS control over PCI Express ASPM (Active State
@@ -79,13 +77,6 @@ config PCIEASPM
When in doubt, say Y.
-config PCIEASPM_DEBUG
- bool "Debug PCI Express ASPM"
- depends on PCIEASPM
- help
- This enables PCI Express ASPM debug support. It will add per-device
- interface to control ASPM.
-
choice
prompt "Default ASPM policy"
default PCIEASPM_DEFAULT
@@ -135,7 +126,6 @@ config PCIE_DPC
config PCIE_PTM
bool "PCI Express Precision Time Measurement support"
- depends on PCIEPORTBUS
help
This enables PCI Express Precision Time Measurement (PTM)
support.
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index b45bc47d04fe..1ca86f2e0166 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "AER: " fmt
#define dev_fmt pr_fmt
+#include <linux/bitops.h>
#include <linux/cper.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
@@ -36,7 +37,7 @@
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
-#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
+#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
unsigned int status;
@@ -201,6 +202,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev)
/**
* pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ * @str: ECRC policy from kernel command line to use
*/
void pcie_ecrc_get_policy(char *str)
{
@@ -448,12 +450,70 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
+void pci_save_aer_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+ int pos;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++);
+ if (pcie_cap_has_rtctl(dev))
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++);
+}
+
+void pci_restore_aer_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+ int pos;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++);
+ if (pcie_cap_has_rtctl(dev))
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++);
+}
+
void pci_aer_init(struct pci_dev *dev)
{
+ int n;
+
dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!dev->aer_cap)
+ return;
- if (dev->aer_cap)
- dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+ dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+
+ /*
+ * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
+ * PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
+ * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
+ * 7.8.4).
+ */
+ n = pcie_cap_has_rtctl(dev) ? 5 : 4;
+ pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_cleanup_aer_error_status_regs(dev);
}
@@ -560,6 +620,7 @@ static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
"BlockedTLP", /* Bit Position 23 */
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
+ "PoisonTLPBlocked", /* Bit Position 26 */
};
static const char *aer_agent_string[] = {
@@ -657,7 +718,8 @@ const struct attribute_group aer_stats_attr_group = {
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
- int status, i, max = -1;
+ unsigned long status = info->status & ~info->mask;
+ int i, max = -1;
u64 *counter = NULL;
struct aer_stats *aer_stats = pdev->aer_stats;
@@ -682,10 +744,8 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
break;
}
- status = (info->status & ~info->mask);
- for (i = 0; i < max; i++)
- if (status & (1 << i))
- counter[i]++;
+ for_each_set_bit(i, &status, max)
+ counter[i]++;
}
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
@@ -717,14 +777,11 @@ static void __print_tlp_header(struct pci_dev *dev,
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
- int i, status;
+ unsigned long status = info->status & ~info->mask;
const char *errmsg = NULL;
- status = (info->status & ~info->mask);
-
- for (i = 0; i < 32; i++) {
- if (!(status & (1 << i)))
- continue;
+ int i;
+ for_each_set_bit(i, &status, 32) {
if (info->severity == AER_CORRECTABLE)
errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
aer_correctable_error_string[i] : NULL;
@@ -1204,7 +1261,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
/**
* aer_isr - consume errors detected by root port
- * @work: definition of this work item
+ * @irq: IRQ assigned to Root Port
+ * @context: pointer to Root Port data structure
*
* Invoked, as DPC, when root port records new detected error
*/
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 652ef23bba35..0dcd44308228 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -64,6 +64,7 @@ struct pcie_link_state {
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_disable:1; /* Clock PM disabled */
/* Exit latencies */
struct aspm_latency latency_up; /* Upstream direction exit latency */
@@ -161,8 +162,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- /* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable)
+ /*
+ * Don't enable Clock PM if the link is not Clock PM capable
+ * or Clock PM is disabled
+ */
+ if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
@@ -192,7 +196,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- link->clkpm_capable = (blacklist) ? 0 : capable;
+ link->clkpm_capable = capable;
+ link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
@@ -894,6 +899,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
return link;
}
+static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
+{
+ struct pci_dev *child;
+
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
+ sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scanned.
@@ -955,6 +968,8 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
+ pcie_aspm_update_sysfs_visibility(pdev);
+
unlock:
mutex_unlock(&aspm_lock);
out:
@@ -1061,19 +1076,26 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
{
- struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link;
+ struct pci_dev *bridge;
if (!pci_is_pcie(pdev))
- return 0;
+ return NULL;
- if (pcie_downstream_port(pdev))
- parent = pdev;
- if (!parent || !parent->link_state)
- return -EINVAL;
+ bridge = pci_upstream_bridge(pdev);
+ if (!bridge || !pci_is_pcie(bridge))
+ return NULL;
+ return bridge->link_state;
+}
+
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+{
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+
+ if (!link)
+ return -EINVAL;
/*
* A driver requested that ASPM be disabled on this device, but
* if we don't have permission to manage ASPM (e.g., on ACPI
@@ -1090,17 +1112,24 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
if (sem)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link = parent->link_state;
if (state & PCIE_LINK_STATE_L0S)
link->aspm_disable |= ASPM_STATE_L0S;
if (state & PCIE_LINK_STATE_L1)
- link->aspm_disable |= ASPM_STATE_L1;
+ /* L1 PM substates require L1 */
+ link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1_1)
+ link->aspm_disable |= ASPM_STATE_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+ link->aspm_disable |= ASPM_STATE_L1_2;
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+ link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+ link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
- if (state & PCIE_LINK_STATE_CLKPM) {
- link->clkpm_capable = 0;
- pcie_set_clkpm(link, 0);
- }
+ if (state & PCIE_LINK_STATE_CLKPM)
+ link->clkpm_disable = 1;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
@@ -1172,127 +1201,161 @@ module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
/**
* pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
* @pdev: Target device.
+ *
+ * Relies on the upstream bridge's link_state being valid. The link_state
+ * is deallocated only when the last child of the bridge (i.e., @pdev or a
+ * sibling) is removed, and the caller should be holding a reference to
+ * @pdev, so this should be safe.
*/
bool pcie_aspm_enabled(struct pci_dev *pdev)
{
- struct pci_dev *bridge = pci_upstream_bridge(pdev);
- bool ret;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- if (!bridge)
+ if (!link)
return false;
- mutex_lock(&aspm_lock);
- ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
- mutex_unlock(&aspm_lock);
-
- return ret;
+ return link->aspm_enabled;
}
EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
-#ifdef CONFIG_PCIEASPM_DEBUG
-static ssize_t link_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t aspm_attr_show_common(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u8 state)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", link_state->aspm_enabled);
+ return sprintf(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
}
-static ssize_t link_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
+static ssize_t aspm_attr_store_common(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, u8 state)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pcie_link_state *link, *root = pdev->link_state->root;
- u32 state;
-
- if (aspm_disabled)
- return -EPERM;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ bool state_enable;
- if (kstrtouint(buf, 10, &state))
- return -EINVAL;
- if ((state & ~ASPM_STATE_ALL) != 0)
+ if (strtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- list_for_each_entry(link, &link_list, sibling) {
- if (link->root != root)
- continue;
- pcie_config_aspm_link(link, state);
+
+ if (state_enable) {
+ link->aspm_disable &= ~state;
+ /* need to enable L1 for substates */
+ if (state & ASPM_STATE_L1SS)
+ link->aspm_disable &= ~ASPM_STATE_L1;
+ } else {
+ link->aspm_disable |= state;
}
+
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
- return n;
+
+ return len;
}
-static ssize_t clk_ctl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+#define ASPM_ATTR(_f, _s) \
+static ssize_t _f##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
+ \
+static ssize_t _f##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+
+ASPM_ATTR(l0s_aspm, L0S)
+ASPM_ATTR(l1_aspm, L1)
+ASPM_ATTR(l1_1_aspm, L1_1)
+ASPM_ATTR(l1_2_aspm, L1_2)
+ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
+ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
+
+static ssize_t clkpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", link_state->clkpm_enabled);
+ return sprintf(buf, "%d\n", link->clkpm_enabled);
}
-static ssize_t clk_ctl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
+static ssize_t clkpm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct pci_dev *pdev = to_pci_dev(dev);
- bool state;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ bool state_enable;
- if (strtobool(buf, &state))
+ if (strtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clkpm_nocheck(pdev->link_state, state);
+
+ link->clkpm_disable = !state_enable;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
- return n;
+ return len;
}
-static DEVICE_ATTR_RW(link_state);
-static DEVICE_ATTR_RW(clk_ctl);
+static DEVICE_ATTR_RW(clkpm);
+static DEVICE_ATTR_RW(l0s_aspm);
+static DEVICE_ATTR_RW(l1_aspm);
+static DEVICE_ATTR_RW(l1_1_aspm);
+static DEVICE_ATTR_RW(l1_2_aspm);
+static DEVICE_ATTR_RW(l1_1_pcipm);
+static DEVICE_ATTR_RW(l1_2_pcipm);
+
+static struct attribute *aspm_ctrl_attrs[] = {
+ &dev_attr_clkpm.attr,
+ &dev_attr_l0s_aspm.attr,
+ &dev_attr_l1_aspm.attr,
+ &dev_attr_l1_1_aspm.attr,
+ &dev_attr_l1_2_aspm.attr,
+ &dev_attr_l1_1_pcipm.attr,
+ &dev_attr_l1_2_pcipm.attr,
+ NULL
+};
-static char power_group[] = "power";
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
+static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (!link_state)
- return;
-
- if (link_state->aspm_support)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clkpm_capable)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
-}
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ static const u8 aspm_state_map[] = {
+ ASPM_STATE_L0S,
+ ASPM_STATE_L1,
+ ASPM_STATE_L1_1,
+ ASPM_STATE_L1_2,
+ ASPM_STATE_L1_1_PCIPM,
+ ASPM_STATE_L1_2_PCIPM,
+ };
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
+ if (aspm_disabled || !link)
+ return 0;
- if (!link_state)
- return;
+ if (n == 0)
+ return link->clkpm_capable ? a->mode : 0;
- if (link_state->aspm_support)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clkpm_capable)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
+ return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
}
-#endif
+
+const struct attribute_group aspm_ctrl_attr_group = {
+ .name = "link",
+ .attrs = aspm_ctrl_attrs,
+ .is_visible = aspm_ctrl_attrs_are_visible,
+};
static int __init pcie_aspm_disable(char *str)
{
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index a32ec3487a8d..e06f42f58d3d 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -291,7 +291,7 @@ static int dpc_probe(struct pcie_device *dev)
int status;
u16 ctl, cap;
- if (pcie_aer_get_firmware_first(pdev))
+ if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 944827a8c7d3..1e673619b101 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,6 +25,8 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 5
+extern bool pcie_ports_dpc_native;
+
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
#else
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b330129089f..5075cb9e850c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -250,8 +250,13 @@ static int get_port_device_capability(struct pci_dev *dev)
pcie_pme_interrupt_enable(dev, false);
}
+ /*
+ * With dpc-native, allow Linux to use DPC even if it doesn't have
+ * permission to use AER.
+ */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
- pci_aer_available() && services & PCIE_PORT_SERVICE_AER)
+ pci_aer_available() &&
+ (pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 0a87091a0800..160d67c59310 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -29,12 +29,20 @@ bool pcie_ports_disabled;
*/
bool pcie_ports_native;
+/*
+ * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe
+ * service even if the platform hasn't given us permission.
+ */
+bool pcie_ports_dpc_native;
+
static int __init pcie_port_setup(char *str)
{
if (!strncmp(str, "compat", 6))
pcie_ports_disabled = true;
else if (!strncmp(str, "native", 6))
pcie_ports_native = true;
+ else if (!strncmp(str, "dpc-native", 10))
+ pcie_ports_dpc_native = true;
return 1;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 98cfa30f3fae..9361f3aa26ab 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -21,7 +21,7 @@ static void pci_ptm_info(struct pci_dev *dev)
snprintf(clock_desc, sizeof(clock_desc), ">254ns");
break;
default:
- snprintf(clock_desc, sizeof(clock_desc), "%udns",
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
dev->ptm_granularity);
break;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3d5271a7a849..512cb4312ddd 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/msi.h>
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci_hotplug.h>
@@ -572,6 +573,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
bridge->release_fn(bridge);
pci_free_resource_list(&bridge->windows);
+ pci_free_resource_list(&bridge->dma_ranges);
}
static void pci_release_host_bridge_dev(struct device *dev)
@@ -897,6 +899,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
else
pr_info("PCI host bridge to bus %s\n", name);
+ if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
+ dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+
/* Add initial resources to the bus */
resource_list_for_each_entry_safe(window, n, &resources) {
list_move_tail(&window->node, &bridge->windows);
@@ -1089,14 +1094,15 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* @sec: updated with secondary bus number from EA
* @sub: updated with subordinate bus number from EA
*
- * If @dev is a bridge with EA capability, update @sec and @sub with
- * fixed bus numbers from the capability and return true. Otherwise,
- * return false.
+ * If @dev is a bridge with EA capability that specifies valid secondary
+ * and subordinate bus numbers, return true with the bus numbers in @sec
+ * and @sub. Otherwise return false.
*/
static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
{
int ea, offset;
u32 dw;
+ u8 ea_sec, ea_sub;
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
return false;
@@ -1108,8 +1114,13 @@ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
offset = ea + PCI_EA_FIRST_ENT;
pci_read_config_dword(dev, offset, &dw);
- *sec = dw & PCI_EA_SEC_BUS_MASK;
- *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ ea_sec = dw & PCI_EA_SEC_BUS_MASK;
+ ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ if (ea_sec == 0 || ea_sub < ea_sec)
+ return false;
+
+ *sec = ea_sec;
+ *sub = ea_sub;
return true;
}
@@ -2300,8 +2311,7 @@ void pcie_report_downtraining(struct pci_dev *dev)
static void pci_init_capabilities(struct pci_dev *dev)
{
- /* Enhanced Allocation */
- pci_ea_init(dev);
+ pci_ea_init(dev); /* Enhanced Allocation */
/* Setup MSI caps & disable MSI/MSI-X interrupts */
pci_msi_setup_pci_dev(dev);
@@ -2309,29 +2319,16 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
- /* Power Management */
- pci_pm_init(dev);
-
- /* Vital Product Data */
- pci_vpd_init(dev);
-
- /* Alternative Routing-ID Forwarding */
- pci_configure_ari(dev);
-
- /* Single Root I/O Virtualization */
- pci_iov_init(dev);
-
- /* Address Translation Services */
- pci_ats_init(dev);
-
- /* Enable ACS P2P upstream forwarding */
- pci_enable_acs(dev);
-
- /* Precision Time Measurement */
- pci_ptm_init(dev);
-
- /* Advanced Error Reporting */
- pci_aer_init(dev);
+ pci_pm_init(dev); /* Power Management */
+ pci_vpd_init(dev); /* Vital Product Data */
+ pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */
+ pci_iov_init(dev); /* Single Root I/O Virtualization */
+ pci_ats_init(dev); /* Address Translation Services */
+ pci_pri_init(dev); /* Page Request Interface */
+ pci_pasid_init(dev); /* Process Address Space ID */
+ pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */
+ pci_ptm_init(dev); /* Precision Time Measurement */
+ pci_aer_init(dev); /* Advanced Error Reporting */
pcie_report_downtraining(dev);
@@ -2403,13 +2400,10 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
- /* Moved out from quirk header fixup code */
pci_reassigndev_resource_alignment(dev);
- /* Clear the state_saved flag */
dev->state_saved = false;
- /* Initialize various capabilities */
pci_init_capabilities(dev);
/*
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 5495537c60c2..6ef74bf5013f 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -258,13 +258,13 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Make sure the caller is mapping a real resource for this device */
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (dev->resource[i].flags & res_bit &&
pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
- if (i >= PCI_ROM_RESOURCE)
+ if (i >= PCI_STD_NUM_BARS)
return -ENODEV;
if (fpriv->mmap_state == pci_mmap_mem &&
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 320255e5e8f8..4937a088d7d8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -474,7 +474,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
{
int i;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
@@ -1809,7 +1809,7 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
* The next five BARs all seem to be rubbish, so just clean
* them out.
*/
- for (i = 1; i < 6; i++)
+ for (i = 1; i < PCI_STD_NUM_BARS; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
@@ -4033,7 +4033,6 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
if (id)
pci_add_dma_alias(dev, id->driver_data);
}
-
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
/*
@@ -4081,6 +4080,40 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
/*
+ * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices
+ * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx).
+ *
+ * Similarly to MIC x200, we need to add DMA aliases to allow buffer access
+ * when IOMMU is enabled. These aliases allow computational unit access to
+ * host memory. These aliases mark the whole VCA device as one IOMMU
+ * group.
+ *
+ * All possible slot numbers (0x20) are used, since we are unable to tell
+ * what slot is used on other side. This quirk is intended for both host
+ * and computational unit sides. The VCA devices have up to five functions
+ * (four for DMA channels and one additional).
+ */
+static void quirk_pex_vca_alias(struct pci_dev *pdev)
+{
+ const unsigned int num_pci_slots = 0x20;
+ unsigned int slot;
+
+ for (slot = 0; slot < num_pci_slots; slot++) {
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
+
+/*
* The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
* associated not at the root bus, but at a bridge below. This quirk avoids
* generating invalid DMA aliases.
@@ -4263,6 +4296,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
/*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ * by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ * the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires. Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+ return 1;
+ return 0;
+}
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -4305,7 +4356,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
#else
return -ENODEV;
#endif
@@ -4313,33 +4364,38 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
{
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ switch (dev->device) {
/*
- * Effectively selects all downstream ports for whole ThunderX 1
- * family by 0xf800 mask (which represents 8 SoCs), while the lower
- * bits of device ID are used to indicate which subdevice is used
- * within the SoC.
+ * Effectively selects all downstream ports for whole ThunderX1
+ * (which represents 8 SoCs).
*/
- return (pci_is_pcie(dev) &&
- (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
- ((dev->device & 0xf800) == 0xa000));
+ case 0xa000 ... 0xa7ff: /* ThunderX1 */
+ case 0xaf84: /* ThunderX2 */
+ case 0xb884: /* ThunderX3 */
+ return true;
+ default:
+ return false;
+ }
}
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
+ if (!pci_quirk_cavium_acs_match(dev))
+ return -ENOTTY;
+
/*
- * Cavium root ports don't advertise an ACS capability. However,
+ * Cavium Root Ports don't advertise an ACS capability. However,
* the RTL internally implements similar protection as if ACS had
- * Request Redirection, Completion Redirection, Source Validation,
+ * Source Validation, Request Redirection, Completion Redirection,
* and Upstream Forwarding features enabled. Assert that the
* hardware implements and enables equivalent ACS functionality for
* these flags.
*/
- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
- if (!pci_quirk_cavium_acs_match(dev))
- return -ENOTTY;
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4349,13 +4405,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4403,37 +4458,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
return false;
}
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
- INTEL_PCH_ACS_FLAGS : 0;
-
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
- return acs_flags & ~flags ? 0 : 1;
+ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+ return pci_acs_ctrl_enabled(acs_flags, 0);
}
/*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. Hardware supports source validation but it
* will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers. It is not possible for one root
- * port to pass traffic to another root port. All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers. It is not possible for one Root
+ * Port to pass traffic to another Root Port. All PCIe transactions are
+ * terminated inside the Root Port.
*/
static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
- int ret = acs_flags & ~flags ? 0 : 1;
-
- pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
- return ret;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4534,7 +4584,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
- return acs_flags & ~ctrl ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, ctrl);
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4548,10 +4598,9 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
}
static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4562,9 +4611,8 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
* Allow each Root Port to be in a separate IOMMU group by masking
* SV/RR/CR/UF bits.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static const struct pci_dev_acs_enabled {
@@ -4666,6 +4714,17 @@ static const struct pci_dev_acs_enabled {
{ 0 }
};
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev: PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ * -ENOTTY: No quirk applies to this device; we can't tell whether the
+ * device provides the desired controls
+ * 0: Device does not provide all the desired controls
+ * >0: Device provides all the controls in @acs_flags
+ */
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
@@ -4706,7 +4765,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
#define INTEL_BSPR_REG_BPPD (1 << 9)
/* Upstream Peer Decode Configuration Register */
-#define INTEL_UPDCR_REG 0x1114
+#define INTEL_UPDCR_REG 0x1014
/* 5:0 Peer Decode Enable bits */
#define INTEL_UPDCR_REG_MASK 0x3f
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index e7dbe21705ba..f279826204eb 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -752,24 +752,32 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
/*
- * Helper function for sizing routines: find first available bus resource
- * of a given type. Note: we intentionally skip the bus resources which
- * have already been assigned (that is, have non-NULL parent resource).
+ * Helper function for sizing routines. Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type. If there is none,
+ * return first assigned resource of the correct type. If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
*/
-static struct resource *find_free_bus_resource(struct pci_bus *bus,
- unsigned long type_mask,
- unsigned long type)
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+ unsigned long type_mask,
+ unsigned long type)
{
+ struct resource *r, *r_assigned = NULL;
int i;
- struct resource *r;
pci_bus_for_each_resource(bus, r, i) {
if (r == &ioport_resource || r == &iomem_resource)
continue;
if (r && (r->flags & type_mask) == type && !r->parent)
return r;
+ if (r && (r->flags & type_mask) == type && !r_assigned)
+ r_assigned = r;
}
- return NULL;
+ return r_assigned;
}
static resource_size_t calculate_iosize(resource_size_t size,
@@ -866,8 +874,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
- IORESOURCE_IO);
+ struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
+ IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -875,6 +883,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (!b_res)
return;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return;
+
min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -978,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
resource_size_t min_align, align, size, size0, size1;
resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
int order, max_order;
- struct resource *b_res = find_free_bus_resource(bus,
+ struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
resource_size_t children_add_size = 0;
resource_size_t children_add_align = 0;
@@ -987,6 +999,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (!b_res)
return -ENOSPC;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return 0;
+
memset(aligns, 0, sizeof(aligns));
max_order = 0;
size = 0;
@@ -1178,7 +1194,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
unsigned long mask, prefmask, type2 = 0, type3 = 0;
- resource_size_t additional_mem_size = 0, additional_io_size = 0;
+ resource_size_t additional_io_size = 0, additional_mmio_size = 0,
+ additional_mmio_pref_size = 0;
struct resource *b_res;
int ret;
@@ -1212,7 +1229,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pci_bridge_check_ranges(bus);
if (bus->self->is_hotplug_bridge) {
additional_io_size = pci_hotplug_io_size;
- additional_mem_size = pci_hotplug_mem_size;
+ additional_mmio_size = pci_hotplug_mmio_size;
+ additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
}
/* Fall through */
default:
@@ -1230,9 +1248,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (b_res[2].flags & IORESOURCE_MEM_64) {
prefmask |= IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
/*
* If successful, all non-prefetchable resources
@@ -1254,9 +1272,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (!type2) {
prefmask &= ~IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
/*
* If successful, only non-prefetchable resources
@@ -1265,7 +1283,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (ret == 0)
mask = prefmask;
else
- additional_mem_size += additional_mem_size;
+ additional_mmio_size += additional_mmio_pref_size;
type2 = type3 = IORESOURCE_MEM;
}
@@ -1285,8 +1303,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
* prefetchable resource in a 64-bit prefetchable window.
*/
pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ realloc_head ? 0 : additional_mmio_size,
+ additional_mmio_size, realloc_head);
break;
}
}
@@ -2066,6 +2084,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
unsigned int i;
int ret;
+ down_read(&pci_bus_sem);
+
/* Walk to the root hub, releasing bridge BARs when possible */
next = bridge;
do {
@@ -2100,8 +2120,10 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
next = bridge->bus ? bridge->bus->self : NULL;
} while (next);
- if (list_empty(&saved))
+ if (list_empty(&saved)) {
+ up_read(&pci_bus_sem);
return -ENOENT;
+ }
__pci_bus_size_bridges(bridge->subordinate, &added);
__pci_bridge_assign_resources(bridge, &added, &failed);
@@ -2122,6 +2144,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
}
free_list(&saved);
+ up_read(&pci_bus_sem);
return 0;
cleanup:
@@ -2150,6 +2173,7 @@ cleanup:
pci_setup_bridge(bridge->subordinate);
}
free_list(&saved);
+ up_read(&pci_bus_sem);
return ret;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 66610f04d76d..88091bbfe77f 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -675,7 +675,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
return -ENOMEM;
s->global = ioread32(&stdev->mmio_sw_event->global_summary);
- s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index ac322d643c7a..08e322789e59 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -50,6 +50,8 @@
#define PHY_R5_PHY_CR_ACK BIT(16)
#define PHY_R5_PHY_BS_OUT BIT(17)
+#define PCIE_RESET_DELAY 500
+
struct phy_g12a_usb3_pcie_priv {
struct regmap *regmap;
struct regmap *regmap_cr;
@@ -196,6 +198,10 @@ static int phy_g12a_usb3_init(struct phy *phy)
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
int data, ret;
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
/* Switch PHY to USB3 */
/* TODO figure out how to handle when PCIe was set in the bootloader */
regmap_update_bits(priv->regmap, PHY_R0,
@@ -272,24 +278,64 @@ static int phy_g12a_usb3_init(struct phy *phy)
return 0;
}
-static int phy_g12a_usb3_pcie_init(struct phy *phy)
+static int phy_g12a_usb3_pcie_power_on(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_POWER_STATE,
+ FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_power_off(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_POWER_STATE,
+ FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1d));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_reset(struct phy *phy)
{
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
int ret;
- ret = reset_control_reset(priv->reset);
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ ret = reset_control_assert(priv->reset);
if (ret)
return ret;
+ udelay(PCIE_RESET_DELAY);
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret)
+ return ret;
+
+ udelay(PCIE_RESET_DELAY);
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_init(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
if (priv->mode == PHY_TYPE_USB3)
return phy_g12a_usb3_init(phy);
- /* Power UP PCIE */
- /* TODO figure out when the bootloader has set USB3 mode before */
- regmap_update_bits(priv->regmap, PHY_R0,
- PHY_R0_PCIE_POWER_STATE,
- FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
-
return 0;
}
@@ -297,7 +343,10 @@ static int phy_g12a_usb3_pcie_exit(struct phy *phy)
{
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
- return reset_control_reset(priv->reset);
+ if (priv->mode == PHY_TYPE_USB3)
+ return reset_control_reset(priv->reset);
+
+ return 0;
}
static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
@@ -326,6 +375,9 @@ static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
static const struct phy_ops phy_g12a_usb3_pcie_ops = {
.init = phy_g12a_usb3_pcie_init,
.exit = phy_g12a_usb3_pcie_exit,
+ .power_on = phy_g12a_usb3_pcie_power_on,
+ .power_off = phy_g12a_usb3_pcie_power_off,
+ .reset = phy_g12a_usb3_pcie_reset,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 4053ba6cd0fb..005e02dd4a91 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -103,3 +103,14 @@ config PHY_PXA_USB
The PHY driver will be used by Marvell udc/ehci/otg driver.
To compile this driver as a module, choose M here.
+
+config PHY_MMP3_USB
+ tristate "Marvell MMP3 USB PHY Driver"
+ depends on MACH_MMP3_DT || COMPILE_TEST
+ select GENERIC_PHY
+ help
+ Enable this to support Marvell MMP3 USB PHY driver for Marvell
+ SoC. This driver will do the PHY initialization and shutdown.
+ The PHY driver will be used by Marvell udc/ehci/otg driver.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 434eb9ca6cc3..5a106b1549f4 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
+obj-$(CONFIG_PHY_MMP3_USB) += phy-mmp3-usb.o
obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY) += phy-mvebu-a3700-comphy.o
obj-$(CONFIG_PHY_MVEBU_A3700_UTMI) += phy-mvebu-a3700-utmi.o
obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY) += phy-armada38x-comphy.o
diff --git a/drivers/phy/marvell/phy-mmp3-usb.c b/drivers/phy/marvell/phy-mmp3-usb.c
new file mode 100644
index 000000000000..499869595a58
--- /dev/null
+++ b/drivers/phy/marvell/phy-mmp3-usb.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Copyright (C) 2018,2019 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mmp/cputype.h>
+
+#define USB2_PLL_REG0 0x4
+#define USB2_PLL_REG1 0x8
+#define USB2_TX_REG0 0x10
+#define USB2_TX_REG1 0x14
+#define USB2_TX_REG2 0x18
+#define USB2_RX_REG0 0x20
+#define USB2_RX_REG1 0x24
+#define USB2_RX_REG2 0x28
+#define USB2_ANA_REG0 0x30
+#define USB2_ANA_REG1 0x34
+#define USB2_ANA_REG2 0x38
+#define USB2_DIG_REG0 0x3C
+#define USB2_DIG_REG1 0x40
+#define USB2_DIG_REG2 0x44
+#define USB2_DIG_REG3 0x48
+#define USB2_TEST_REG0 0x4C
+#define USB2_TEST_REG1 0x50
+#define USB2_TEST_REG2 0x54
+#define USB2_CHARGER_REG0 0x58
+#define USB2_OTG_REG0 0x5C
+#define USB2_PHY_MON0 0x60
+#define USB2_RESETVE_REG0 0x64
+#define USB2_ICID_REG0 0x78
+#define USB2_ICID_REG1 0x7C
+
+/* USB2_PLL_REG0 */
+
+/* This is for Ax stepping */
+#define USB2_PLL_FBDIV_SHIFT_MMP3 0
+#define USB2_PLL_FBDIV_MASK_MMP3 (0xFF << 0)
+
+#define USB2_PLL_REFDIV_SHIFT_MMP3 8
+#define USB2_PLL_REFDIV_MASK_MMP3 (0xF << 8)
+
+#define USB2_PLL_VDD12_SHIFT_MMP3 12
+#define USB2_PLL_VDD18_SHIFT_MMP3 14
+
+/* This is for B0 stepping */
+#define USB2_PLL_FBDIV_SHIFT_MMP3_B0 0
+#define USB2_PLL_REFDIV_SHIFT_MMP3_B0 9
+#define USB2_PLL_VDD18_SHIFT_MMP3_B0 14
+#define USB2_PLL_FBDIV_MASK_MMP3_B0 0x01FF
+#define USB2_PLL_REFDIV_MASK_MMP3_B0 0x3E00
+
+#define USB2_PLL_CAL12_SHIFT_MMP3 0
+#define USB2_PLL_CALI12_MASK_MMP3 (0x3 << 0)
+
+#define USB2_PLL_VCOCAL_START_SHIFT_MMP3 2
+
+#define USB2_PLL_KVCO_SHIFT_MMP3 4
+#define USB2_PLL_KVCO_MASK_MMP3 (0x7<<4)
+
+#define USB2_PLL_ICP_SHIFT_MMP3 8
+#define USB2_PLL_ICP_MASK_MMP3 (0x7<<8)
+
+#define USB2_PLL_LOCK_BYPASS_SHIFT_MMP3 12
+
+#define USB2_PLL_PU_PLL_SHIFT_MMP3 13
+#define USB2_PLL_PU_PLL_MASK (0x1 << 13)
+
+#define USB2_PLL_READY_MASK_MMP3 (0x1 << 15)
+
+/* USB2_TX_REG0 */
+#define USB2_TX_IMPCAL_VTH_SHIFT_MMP3 8
+#define USB2_TX_IMPCAL_VTH_MASK_MMP3 (0x7 << 8)
+
+#define USB2_TX_RCAL_START_SHIFT_MMP3 13
+
+/* USB2_TX_REG1 */
+#define USB2_TX_CK60_PHSEL_SHIFT_MMP3 0
+#define USB2_TX_CK60_PHSEL_MASK_MMP3 (0xf << 0)
+
+#define USB2_TX_AMP_SHIFT_MMP3 4
+#define USB2_TX_AMP_MASK_MMP3 (0x7 << 4)
+
+#define USB2_TX_VDD12_SHIFT_MMP3 8
+#define USB2_TX_VDD12_MASK_MMP3 (0x3 << 8)
+
+/* USB2_TX_REG2 */
+#define USB2_TX_DRV_SLEWRATE_SHIFT 10
+
+/* USB2_RX_REG0 */
+#define USB2_RX_SQ_THRESH_SHIFT_MMP3 4
+#define USB2_RX_SQ_THRESH_MASK_MMP3 (0xf << 4)
+
+#define USB2_RX_SQ_LENGTH_SHIFT_MMP3 10
+#define USB2_RX_SQ_LENGTH_MASK_MMP3 (0x3 << 10)
+
+/* USB2_ANA_REG1*/
+#define USB2_ANA_PU_ANA_SHIFT_MMP3 14
+
+/* USB2_OTG_REG0 */
+#define USB2_OTG_PU_OTG_SHIFT_MMP3 3
+
+struct mmp3_usb_phy {
+ struct phy *phy;
+ void __iomem *base;
+};
+
+static unsigned int u2o_get(void __iomem *base, unsigned int offset)
+{
+ return readl_relaxed(base + offset);
+}
+
+static void u2o_set(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg |= value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static void u2o_clear(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static int mmp3_usb_phy_init(struct phy *phy)
+{
+ struct mmp3_usb_phy *mmp3_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = mmp3_usb_phy->base;
+
+ if (cpu_is_mmp3_a0()) {
+ u2o_clear(base, USB2_PLL_REG0, (USB2_PLL_FBDIV_MASK_MMP3
+ | USB2_PLL_REFDIV_MASK_MMP3));
+ u2o_set(base, USB2_PLL_REG0,
+ 0xd << USB2_PLL_REFDIV_SHIFT_MMP3
+ | 0xf0 << USB2_PLL_FBDIV_SHIFT_MMP3);
+ } else if (cpu_is_mmp3_b0()) {
+ u2o_clear(base, USB2_PLL_REG0, USB2_PLL_REFDIV_MASK_MMP3_B0
+ | USB2_PLL_FBDIV_MASK_MMP3_B0);
+ u2o_set(base, USB2_PLL_REG0,
+ 0xd << USB2_PLL_REFDIV_SHIFT_MMP3_B0
+ | 0xf0 << USB2_PLL_FBDIV_SHIFT_MMP3_B0);
+ } else {
+ dev_err(&phy->dev, "unsupported silicon revision\n");
+ return -ENODEV;
+ }
+
+ u2o_clear(base, USB2_PLL_REG1, USB2_PLL_PU_PLL_MASK
+ | USB2_PLL_ICP_MASK_MMP3
+ | USB2_PLL_KVCO_MASK_MMP3
+ | USB2_PLL_CALI12_MASK_MMP3);
+ u2o_set(base, USB2_PLL_REG1, 1 << USB2_PLL_PU_PLL_SHIFT_MMP3
+ | 1 << USB2_PLL_LOCK_BYPASS_SHIFT_MMP3
+ | 3 << USB2_PLL_ICP_SHIFT_MMP3
+ | 3 << USB2_PLL_KVCO_SHIFT_MMP3
+ | 3 << USB2_PLL_CAL12_SHIFT_MMP3);
+
+ u2o_clear(base, USB2_TX_REG0, USB2_TX_IMPCAL_VTH_MASK_MMP3);
+ u2o_set(base, USB2_TX_REG0, 2 << USB2_TX_IMPCAL_VTH_SHIFT_MMP3);
+
+ u2o_clear(base, USB2_TX_REG1, USB2_TX_VDD12_MASK_MMP3
+ | USB2_TX_AMP_MASK_MMP3
+ | USB2_TX_CK60_PHSEL_MASK_MMP3);
+ u2o_set(base, USB2_TX_REG1, 3 << USB2_TX_VDD12_SHIFT_MMP3
+ | 4 << USB2_TX_AMP_SHIFT_MMP3
+ | 4 << USB2_TX_CK60_PHSEL_SHIFT_MMP3);
+
+ u2o_clear(base, USB2_TX_REG2, 3 << USB2_TX_DRV_SLEWRATE_SHIFT);
+ u2o_set(base, USB2_TX_REG2, 2 << USB2_TX_DRV_SLEWRATE_SHIFT);
+
+ u2o_clear(base, USB2_RX_REG0, USB2_RX_SQ_THRESH_MASK_MMP3);
+ u2o_set(base, USB2_RX_REG0, 0xa << USB2_RX_SQ_THRESH_SHIFT_MMP3);
+
+ u2o_set(base, USB2_ANA_REG1, 0x1 << USB2_ANA_PU_ANA_SHIFT_MMP3);
+
+ u2o_set(base, USB2_OTG_REG0, 0x1 << USB2_OTG_PU_OTG_SHIFT_MMP3);
+
+ return 0;
+}
+
+static int mmp3_usb_phy_calibrate(struct phy *phy)
+{
+ struct mmp3_usb_phy *mmp3_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = mmp3_usb_phy->base;
+ int loops;
+
+ /*
+ * PLL VCO and TX Impedance Calibration Timing:
+ *
+ * _____________________________________
+ * PU __________|
+ * _____________________________
+ * VCOCAL START _________|
+ * ___
+ * REG_RCAL_START ________________| |________|_______
+ * | 200us | 400us | 40| 400us | USB PHY READY
+ */
+
+ udelay(200);
+ u2o_set(base, USB2_PLL_REG1, 1 << USB2_PLL_VCOCAL_START_SHIFT_MMP3);
+ udelay(400);
+ u2o_set(base, USB2_TX_REG0, 1 << USB2_TX_RCAL_START_SHIFT_MMP3);
+ udelay(40);
+ u2o_clear(base, USB2_TX_REG0, 1 << USB2_TX_RCAL_START_SHIFT_MMP3);
+ udelay(400);
+
+ loops = 0;
+ while ((u2o_get(base, USB2_PLL_REG1) & USB2_PLL_READY_MASK_MMP3) == 0) {
+ mdelay(1);
+ loops++;
+ if (loops > 100) {
+ dev_err(&phy->dev, "PLL_READY not set after 100mS.\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static const struct phy_ops mmp3_usb_phy_ops = {
+ .init = mmp3_usb_phy_init,
+ .calibrate = mmp3_usb_phy_calibrate,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id mmp3_usb_phy_of_match[] = {
+ { .compatible = "marvell,mmp3-usb-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mmp3_usb_phy_of_match);
+
+static int mmp3_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *resource;
+ struct mmp3_usb_phy *mmp3_usb_phy;
+ struct phy_provider *provider;
+
+ mmp3_usb_phy = devm_kzalloc(dev, sizeof(*mmp3_usb_phy), GFP_KERNEL);
+ if (!mmp3_usb_phy)
+ return -ENOMEM;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmp3_usb_phy->base = devm_ioremap_resource(dev, resource);
+ if (IS_ERR(mmp3_usb_phy->base)) {
+ dev_err(dev, "failed to remap PHY regs\n");
+ return PTR_ERR(mmp3_usb_phy->base);
+ }
+
+ mmp3_usb_phy->phy = devm_phy_create(dev, NULL, &mmp3_usb_phy_ops);
+ if (IS_ERR(mmp3_usb_phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(mmp3_usb_phy->phy);
+ }
+
+ phy_set_drvdata(mmp3_usb_phy->phy, mmp3_usb_phy);
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(provider);
+ }
+
+ return 0;
+}
+
+static struct platform_driver mmp3_usb_phy_driver = {
+ .probe = mmp3_usb_phy_probe,
+ .driver = {
+ .name = "mmp3-usb-phy",
+ .of_match_table = mmp3_usb_phy_of_match,
+ },
+};
+module_platform_driver(mmp3_usb_phy_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Marvell MMP3 USB PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 62fcae9f05ae..5d6f9f61ce02 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -23,6 +23,8 @@
#include <linux/pm.h>
#include <linux/log2.h>
+#include <linux/soc/qcom/irq.h>
+
#include "../core.h"
#include "../pinconf.h"
#include "pinctrl-msm.h"
@@ -44,6 +46,7 @@
* @enabled_irqs: Bitmap of currently enabled irqs.
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
+ * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
* @soc; Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
*/
@@ -61,6 +64,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
@@ -707,6 +711,12 @@ static void msm_gpio_irq_mask(struct irq_data *d)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_mask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -751,6 +761,12 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_unmask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -778,10 +794,35 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
static void msm_gpio_irq_enable(struct irq_data *d)
{
+ /*
+ * Clear the interrupt that may be pending before we enable
+ * the line.
+ * This is especially a problem with the GPIOs routed to the
+ * PDC. These GPIOs are direct-connect interrupts to the GIC.
+ * Disabling the interrupt line at the PDC does not prevent
+ * the interrupt from being latched at the GIC. The state at
+ * GIC needs to be cleared before enabling.
+ */
+ if (d->parent_data) {
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_chip_enable_parent(d);
+ }
msm_gpio_irq_clear_unmask(d, true);
}
+static void msm_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data)
+ irq_chip_disable_parent(d);
+
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ msm_gpio_irq_mask(d);
+}
+
static void msm_gpio_irq_unmask(struct irq_data *d)
{
msm_gpio_irq_clear_unmask(d, false);
@@ -795,6 +836,9 @@ static void msm_gpio_irq_ack(struct irq_data *d)
unsigned long flags;
u32 val;
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -820,6 +864,12 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_set_type_parent(d, type);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return 0;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -912,6 +962,15 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
+ /*
+ * While they may not wake up when the TLMM is powered off,
+ * some GPIOs would like to wakeup the system from suspend
+ * when TLMM is powered on. To allow that, enable the GPIO
+ * summary line to be wakeup capable at GIC.
+ */
+ if (d->parent_data)
+ irq_chip_set_wake_parent(d, on);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on);
@@ -990,6 +1049,30 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static int msm_gpio_wakeirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_gpio_wakeirq_map *map;
+ int i;
+
+ *parent = GPIO_NO_WAKE_IRQ;
+ *parent_type = IRQ_TYPE_EDGE_RISING;
+
+ for (i = 0; i < pctrl->soc->nwakeirq_map; i++) {
+ map = &pctrl->soc->wakeirq_map[i];
+ if (map->gpio == child) {
+ *parent = map->wakeirq;
+ break;
+ }
+ }
+
+ return 0;
+}
+
static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
{
if (pctrl->soc->reserved_gpios)
@@ -1002,8 +1085,10 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
{
struct gpio_chip *chip;
struct gpio_irq_chip *girq;
- int ret;
- unsigned ngpio = pctrl->soc->ngpios;
+ int i, ret;
+ unsigned gpio, ngpio = pctrl->soc->ngpios;
+ struct device_node *np;
+ bool skip;
if (WARN_ON(ngpio > MAX_NR_GPIO))
return -EINVAL;
@@ -1020,17 +1105,40 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.name = "msmgpio";
pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
+ pctrl->irq_chip.irq_disable = msm_gpio_irq_disable;
pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
+ pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+ np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
+ if (np) {
+ chip->irq.parent_domain = irq_find_matching_host(np,
+ DOMAIN_BUS_WAKEUP);
+ of_node_put(np);
+ if (!chip->irq.parent_domain)
+ return -EPROBE_DEFER;
+ chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
+
+ /*
+ * Let's skip handling the GPIOs, if the parent irqchip
+ * is handling the direct connect IRQ of the GPIO.
+ */
+ skip = irq_domain_qcom_handle_wakeup(chip->irq.parent_domain);
+ for (i = 0; skip && i < pctrl->soc->nwakeirq_map; i++) {
+ gpio = pctrl->soc->wakeirq_map[i].gpio;
+ set_bit(gpio, pctrl->skip_wake_irqs);
+ }
+ }
+
girq = &chip->irq;
girq->chip = &pctrl->irq_chip;
girq->parent_handler = msm_gpio_irq_handler;
+ girq->fwnode = pctrl->dev->fwnode;
girq->num_parents = 1;
girq->parents = devm_kcalloc(pctrl->dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 48569cda8471..9452da18a78b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -92,6 +92,16 @@ struct msm_pingroup {
};
/**
+ * struct msm_gpio_wakeirq_map - Map of GPIOs and their wakeup pins
+ * @gpio: The GPIOs that are wakeup capable
+ * @wakeirq: The interrupt at the always-on interrupt controller
+ */
+struct msm_gpio_wakeirq_map {
+ unsigned int gpio;
+ unsigned int wakeirq;
+};
+
+/**
* struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
* @pins: An array describing all pins the pin controller affects.
* @npins: The number of entries in @pins.
@@ -101,6 +111,8 @@ struct msm_pingroup {
* @ngroups: The numbmer of entries in @groups.
* @ngpio: The number of pingroups the driver should expose as GPIOs.
* @pull_no_keeper: The SoC does not support keeper bias.
+ * @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
+ * @nwakeirq_map: The number of entries in @wakeirq_map
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -114,6 +126,8 @@ struct msm_pinctrl_soc_data {
const char *const *tiles;
unsigned int ntiles;
const int *reserved_gpios;
+ const struct msm_gpio_wakeirq_map *wakeirq_map;
+ unsigned int nwakeirq_map;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index ce495970459d..2834d2c1338c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/acpi.h>
@@ -1282,6 +1282,24 @@ static const int sdm845_acpi_reserved_gpios[] = {
0, 1, 2, 3, 81, 82, 83, 84, -1
};
+static const struct msm_gpio_wakeirq_map sdm845_pdc_map[] = {
+ { 1, 30 }, { 3, 31 }, { 5, 32 }, { 10, 33 }, { 11, 34 },
+ { 20, 35 }, { 22, 36 }, { 24, 37 }, { 26, 38 }, { 30, 39 },
+ { 31, 117 }, { 32, 41 }, { 34, 42 }, { 36, 43 }, { 37, 44 },
+ { 38, 45 }, { 39, 46 }, { 40, 47 }, { 41, 115 }, { 43, 49 },
+ { 44, 50 }, { 46, 51 }, { 48, 52 }, { 49, 118 }, { 52, 54 },
+ { 53, 55 }, { 54, 56 }, { 56, 57 }, { 57, 58 }, { 58, 59 },
+ { 59, 60 }, { 60, 61 }, { 61, 62 }, { 62, 63 }, { 63, 64 },
+ { 64, 65 }, { 66, 66 }, { 68, 67 }, { 71, 68 }, { 73, 69 },
+ { 77, 70 }, { 78, 71 }, { 79, 72 }, { 80, 73 }, { 84, 74 },
+ { 85, 75 }, { 86, 76 }, { 88, 77 }, { 89, 116 }, { 91, 79 },
+ { 92, 80 }, { 95, 81 }, { 96, 82 }, { 97, 83 }, { 101, 84 },
+ { 103, 85 }, { 104, 86 }, { 115, 90 }, { 116, 91 }, { 117, 92 },
+ { 118, 93 }, { 119, 94 }, { 120, 95 }, { 121, 96 }, { 122, 97 },
+ { 123, 98 }, { 124, 99 }, { 125, 100 }, { 127, 102 }, { 128, 103 },
+ { 129, 104 }, { 130, 105 }, { 132, 106 }, { 133, 107 }, { 145, 108 },
+};
+
static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.pins = sdm845_pins,
.npins = ARRAY_SIZE(sdm845_pins),
@@ -1290,6 +1308,9 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.groups = sdm845_groups,
.ngroups = ARRAY_SIZE(sdm845_groups),
.ngpios = 151,
+ .wakeirq_map = sdm845_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
+
};
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index ee5f08ea57b6..5f57282a28da 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -132,9 +132,9 @@ config CROS_EC_LPC
module will be called cros_ec_lpcs.
config CROS_EC_PROTO
- bool
- help
- ChromeOS EC communication protocol helpers.
+ bool
+ help
+ ChromeOS EC communication protocol helpers.
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
@@ -190,6 +190,19 @@ config CROS_EC_DEBUGFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_debugfs.
+config CROS_EC_SENSORHUB
+ tristate "ChromeOS EC MEMS Sensor Hub"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ Allow loading IIO sensors. This driver is loaded by MFD and will in
+ turn query the EC and register the sensors.
+ It also spreads the sensor data coming from the EC to the IIO sensor
+ object.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_sensorhub.
+
config CROS_EC_SYSFS
tristate "ChromeOS EC control and information through sysfs"
depends on MFD_CROS_EC_DEV && SYSFS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 477ec3d1d1c9..aacd5920d8a1 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index fd77e6fa74c2..6d6ce86a1408 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -31,13 +31,32 @@ static struct cros_ec_platform pd_p = {
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX),
};
-static irqreturn_t ec_irq_thread(int irq, void *data)
+static irqreturn_t ec_irq_handler(int irq, void *data)
{
struct cros_ec_device *ec_dev = data;
- bool wake_event = true;
+
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * cros_ec_handle_event() - process and forward pending events on EC
+ * @ec_dev: Device with events to process.
+ *
+ * Call this function in a loop when the kernel is notified that the EC has
+ * pending events.
+ *
+ * Return: true if more events are still pending and this function should be
+ * called again.
+ */
+bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
+{
+ bool wake_event;
+ bool ec_has_more_events;
int ret;
- ret = cros_ec_get_next_event(ec_dev, &wake_event);
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &ec_has_more_events);
/*
* Signal only if wake host events or any interrupt if
@@ -50,6 +69,20 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
if (ret > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
0, ec_dev);
+
+ return ec_has_more_events;
+}
+EXPORT_SYMBOL(cros_ec_handle_event);
+
+static irqreturn_t ec_irq_thread(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
+
return IRQ_HANDLED;
}
@@ -104,6 +137,15 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
return ret;
}
+/**
+ * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
+ * @ec_dev: Device to register.
+ *
+ * Before calling this, allocate a pointer to a new device and then fill
+ * in all the fields up to the --private-- marker.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_register(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -131,10 +173,12 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
return err;
}
- if (ec_dev->irq) {
- err = devm_request_threaded_irq(dev, ec_dev->irq, NULL,
- ec_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "chromeos-ec", ec_dev);
+ if (ec_dev->irq > 0) {
+ err = devm_request_threaded_irq(dev, ec_dev->irq,
+ ec_irq_handler,
+ ec_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "chromeos-ec", ec_dev);
if (err) {
dev_err(dev, "Failed to request IRQ %d: %d",
ec_dev->irq, err);
@@ -198,6 +242,14 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
}
EXPORT_SYMBOL(cros_ec_register);
+/**
+ * cros_ec_unregister() - Remove a ChromeOS EC.
+ * @ec_dev: Device to unregister.
+ *
+ * Call this to deregister a ChromeOS EC, then clean up any private data.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_unregister(struct cros_ec_device *ec_dev)
{
if (ec_dev->pd)
@@ -209,6 +261,14 @@ int cros_ec_unregister(struct cros_ec_device *ec_dev)
EXPORT_SYMBOL(cros_ec_unregister);
#ifdef CONFIG_PM_SLEEP
+/**
+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+ * @ec_dev: Device to suspend.
+ *
+ * This can be called by drivers to handle a suspend event.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_suspend(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -238,11 +298,19 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
while (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL) > 0)
+ cros_ec_get_next_event(ec_dev, NULL, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
}
+/**
+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+ * @ec_dev: Device to resume.
+ *
+ * This can be called by drivers to handle a resume event.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
int ret;
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 25ca2c894b4d..e5996821d08b 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -136,11 +136,11 @@ static void ish_evt_handler(struct work_struct *work)
struct ishtp_cl_data *client_data =
container_of(work, struct ishtp_cl_data, work_ec_evt);
struct cros_ec_device *ec_dev = client_data->ec_dev;
+ bool ec_has_more_events;
- if (cros_ec_get_next_event(ec_dev, NULL) > 0) {
- blocking_notifier_call_chain(&ec_dev->event_notifier,
- 0, ec_dev);
- }
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
}
/**
@@ -200,13 +200,14 @@ static int ish_send(struct ishtp_cl_data *client_data,
* process_recv() - Received and parse incoming packet
* @cros_ish_cl: Client instance to get stats
* @rb_in_proc: Host interface message buffer
+ * @timestamp: Timestamp of when parent callback started
*
* Parse the incoming packet. If it is a response packet then it will
* update per instance flags and wake up the caller waiting to for the
* response. If it is an event packet then it will schedule event work.
*/
static void process_recv(struct ishtp_cl *cros_ish_cl,
- struct ishtp_cl_rb *rb_in_proc)
+ struct ishtp_cl_rb *rb_in_proc, ktime_t timestamp)
{
size_t data_len = rb_in_proc->buf_idx;
struct ishtp_cl_data *client_data =
@@ -295,6 +296,11 @@ error_wake_up:
break;
case CROS_MKBP_EVENT:
+ /*
+ * Set timestamp from beginning of function since we actually
+ * got an incoming MKBP event
+ */
+ client_data->ec_dev->last_event_time = timestamp;
/* The event system doesn't send any data in buffer */
schedule_work(&client_data->work_ec_evt);
@@ -322,10 +328,17 @@ static void ish_event_cb(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl_rb *rb_in_proc;
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ ktime_t timestamp;
+
+ /*
+ * Take timestamp as close to hardware interrupt as possible for sensor
+ * timestamps.
+ */
+ timestamp = cros_ec_get_time_ns();
while ((rb_in_proc = ishtp_cl_rx_get_rb(cros_ish_cl)) != NULL) {
/* Decide what to do with received data */
- process_recv(cros_ish_cl, rb_in_proc);
+ process_recv(cros_ish_cl, rb_in_proc, timestamp);
}
}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 7d10d909435f..dccf479c6625 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -312,11 +312,20 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
{
struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+ int ret;
- if (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL) > 0)
- blocking_notifier_call_chain(&ec_dev->event_notifier, 0,
- ec_dev);
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ if (ec_dev->mkbp_event_supported)
+ do {
+ ret = cros_ec_get_next_event(ec_dev, NULL,
+ &ec_has_more_events);
+ if (ret > 0)
+ blocking_notifier_call_chain(
+ &ec_dev->event_notifier, 0,
+ ec_dev);
+ } while (ec_has_more_events);
if (value == ACPI_NOTIFY_DEVICE_WAKE)
pm_system_wakeup();
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index f659f96bda12..da1b1c450433 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -117,6 +117,17 @@ static int send_command(struct cros_ec_device *ec_dev,
return ret;
}
+/**
+ * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
+ * @ec_dev: Device to register.
+ * @msg: Message to write.
+ *
+ * This is intended to be used by all ChromeOS EC drivers, but at present
+ * only SPI uses it. Once LPC uses the same protocol it can start using it.
+ * I2C could use it now, with a refactor of the existing code.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -141,6 +152,16 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
+/**
+ * cros_ec_check_result() - Check ec_msg->result.
+ * @ec_dev: EC device.
+ * @msg: Message to check.
+ *
+ * This is used by ChromeOS EC drivers to check the ec_msg->result for
+ * errors and to warn about them.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -326,6 +347,13 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
return ret;
}
+/**
+ * cros_ec_query_all() - Query the protocol version supported by the
+ * ChromeOS EC.
+ * @ec_dev: Device to register.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -428,7 +456,10 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
if (ret < 0 || ver_mask == 0)
ec_dev->mkbp_event_supported = 0;
else
- ec_dev->mkbp_event_supported = 1;
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n",
+ ec_dev->mkbp_event_supported - 1);
/* Probe if host sleep v1 is supported for S0ix failure detection. */
ret = cros_ec_get_host_command_version_mask(ec_dev,
@@ -453,6 +484,16 @@ exit:
}
EXPORT_SYMBOL(cros_ec_query_all);
+/**
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used
+ * instead of calling the EC's cmd_xfer() callback directly.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -500,6 +541,18 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
+/**
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * This function is identical to cros_ec_cmd_xfer, except it returns success
+ * status only if both the command was transmitted successfully and the EC
+ * replied with success status. It's not necessary to check msg->result when
+ * using this function.
+ *
+ * Return: The number of bytes transferred on success or negative error code.
+ */
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -519,6 +572,7 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
static int get_next_event_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg,
+ struct ec_response_get_next_event_v1 *event,
int version, uint32_t size)
{
int ret;
@@ -531,7 +585,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
- memcpy(&ec_dev->event_data, msg->data, ret);
+ ec_dev->event_data = *event;
}
return ret;
@@ -539,30 +593,26 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
static int get_next_event(struct cros_ec_device *ec_dev)
{
- u8 buffer[sizeof(struct cros_ec_command) + sizeof(ec_dev->event_data)];
- struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
- static int cmd_version = 1;
- int ret;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_next_event_v1 event;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_next_event_v1 *event = &buf.event;
+ const int cmd_version = ec_dev->mkbp_event_supported - 1;
+ memset(msg, 0, sizeof(*msg));
if (ec_dev->suspended) {
dev_dbg(ec_dev->dev, "Device suspended.\n");
return -EHOSTDOWN;
}
- if (cmd_version == 1) {
- ret = get_next_event_xfer(ec_dev, msg, cmd_version,
- sizeof(struct ec_response_get_next_event_v1));
- if (ret < 0 || msg->result != EC_RES_INVALID_VERSION)
- return ret;
-
- /* Fallback to version 0 for future send attempts */
- cmd_version = 0;
- }
-
- ret = get_next_event_xfer(ec_dev, msg, cmd_version,
+ if (cmd_version == 0)
+ return get_next_event_xfer(ec_dev, msg, event, 0,
sizeof(struct ec_response_get_next_event));
- return ret;
+ return get_next_event_xfer(ec_dev, msg, event, cmd_version,
+ sizeof(struct ec_response_get_next_event_v1));
}
static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
@@ -584,27 +634,60 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
return ec_dev->event_size;
}
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
+/**
+ * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ * @wake_event: Pointer to a bool set to true upon return if the event might be
+ * treated as a wake event. Ignored if null.
+ * @has_more_events: Pointer to bool set to true if more than one event is
+ * pending.
+ * Some EC will set this flag to indicate cros_ec_get_next_event()
+ * can be called multiple times in a row.
+ * It is an optimization to prevent issuing a EC command for
+ * nothing or wait for another interrupt from the EC to process
+ * the next message.
+ * Ignored if null.
+ *
+ * Return: negative error code on errors; 0 for no data; or else number of
+ * bytes received (i.e., an event was retrieved successfully). Event types are
+ * written out to @ec_dev->event_data.event_type on success.
+ */
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ bool *wake_event,
+ bool *has_more_events)
{
u8 event_type;
u32 host_event;
int ret;
- if (!ec_dev->mkbp_event_supported) {
- ret = get_keyboard_state_event(ec_dev);
- if (ret <= 0)
- return ret;
+ /*
+ * Default value for wake_event.
+ * Wake up on keyboard event, wake up for spurious interrupt or link
+ * error to the EC.
+ */
+ if (wake_event)
+ *wake_event = true;
- if (wake_event)
- *wake_event = true;
+ /*
+ * Default value for has_more_events.
+ * EC will raise another interrupt if AP does not process all events
+ * anyway.
+ */
+ if (has_more_events)
+ *has_more_events = false;
- return ret;
- }
+ if (!ec_dev->mkbp_event_supported)
+ return get_keyboard_state_event(ec_dev);
ret = get_next_event(ec_dev);
if (ret <= 0)
return ret;
+ if (has_more_events)
+ *has_more_events = ec_dev->event_data.event_type &
+ EC_MKBP_HAS_MORE_EVENTS;
+ ec_dev->event_data.event_type &= EC_MKBP_EVENT_TYPE_MASK;
+
if (wake_event) {
event_type = ec_dev->event_data.event_type;
host_event = cros_ec_get_host_event(ec_dev);
@@ -619,15 +702,22 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
else if (host_event &&
!(host_event & ec_dev->host_event_wake_mask))
*wake_event = false;
- /* Consider all other events as wake events. */
- else
- *wake_event = true;
}
return ret;
}
EXPORT_SYMBOL(cros_ec_get_next_event);
+/**
+ * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ *
+ * When MKBP is supported, when the EC raises an interrupt, we collect the
+ * events raised and call the functions in the ec notifier. This function
+ * is a helper to know which events are raised.
+ *
+ * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
+ */
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
{
u32 host_event;
@@ -647,3 +737,120 @@ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
return host_event;
}
EXPORT_SYMBOL(cros_ec_get_host_event);
+
+/**
+ * cros_ec_check_features() - Test for the presence of EC features
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * @feature: One of ec_feature_code bit.
+ *
+ * Call this function to test whether the ChromeOS EC supports a feature.
+ *
+ * Return: 1 if supported, 0 if not
+ */
+int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ if (ec->features[0] == -1U && ec->features[1] == -1U) {
+ /* features bitmap not read yet */
+ msg = kzalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
+ msg->insize = sizeof(ec->features);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
+ ret, msg->result);
+ memset(ec->features, 0, sizeof(ec->features));
+ } else {
+ memcpy(ec->features, msg->data, sizeof(ec->features));
+ }
+
+ dev_dbg(ec->dev, "EC features %08x %08x\n",
+ ec->features[0], ec->features[1]);
+
+ kfree(msg);
+ }
+
+ return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
+}
+EXPORT_SYMBOL_GPL(cros_ec_check_features);
+
+/**
+ * cros_ec_get_sensor_count() - Return the number of MEMS sensors supported.
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * Return: < 0 in case of error.
+ */
+int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
+{
+ /*
+ * Issue a command to get the number of sensor reported.
+ * If not supported, check for legacy mode.
+ */
+ int ret, sensor_count;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+ struct cros_ec_device *ec_dev = ec->ec_dev;
+ u8 status;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 1;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+
+ params = (struct ec_params_motion_sense *)msg->data;
+ params->cmd = MOTIONSENSE_CMD_DUMP;
+
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0) {
+ sensor_count = ret;
+ } else if (msg->result != EC_RES_SUCCESS) {
+ sensor_count = -EPROTO;
+ } else {
+ resp = (struct ec_response_motion_sense *)msg->data;
+ sensor_count = resp->dump.sensor_count;
+ }
+ kfree(msg);
+
+ /*
+ * Check legacy mode: Let's find out if sensors are accessible
+ * via LPC interface.
+ */
+ if (sensor_count == -EPROTO &&
+ ec->cmd_offset == 0 &&
+ ec_dev->cmd_readmem) {
+ ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS,
+ 1, &status);
+ if (ret >= 0 &&
+ (status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
+ /*
+ * We have 2 sensors, one in the lid, one in the base.
+ */
+ sensor_count = 2;
+ } else {
+ /*
+ * EC uses LPC interface and no sensors are presented.
+ */
+ sensor_count = 0;
+ }
+ } else if (sensor_count == -EPROTO) {
+ /* EC responded, but does not understand DUMP command. */
+ sensor_count = 0;
+ }
+ return sensor_count;
+}
+EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 0c3738c3244d..bd068afe43b5 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -143,22 +143,11 @@ cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
struct cros_ec_rpmsg,
host_event_work);
struct cros_ec_device *ec_dev = dev_get_drvdata(&ec_rpmsg->rpdev->dev);
- bool wake_event = true;
- int ret;
-
- ret = cros_ec_get_next_event(ec_dev, &wake_event);
-
- /*
- * Signal only if wake host events or any interrupt if
- * cros_ec_get_next_event() returned an error (default value for
- * wake_event is true)
- */
- if (wake_event && device_may_wakeup(ec_dev->dev))
- pm_wakeup_event(ec_dev->dev, 0);
+ bool ec_has_more_events;
- if (ret > 0)
- blocking_notifier_call_chain(&ec_dev->event_notifier,
- 0, ec_dev);
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
}
static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
new file mode 100644
index 000000000000..04d8879689e9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensor HUB driver that discovers sensors behind a ChromeOS Embedded
+ * Controller.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define DRV_NAME "cros-ec-sensorhub"
+
+static void cros_ec_sensorhub_free_sensor(void *arg)
+{
+ struct platform_device *pdev = arg;
+
+ platform_device_unregister(pdev);
+}
+
+static int cros_ec_sensorhub_allocate_sensor(struct device *parent,
+ char *sensor_name,
+ int sensor_num)
+{
+ struct cros_ec_sensor_platform sensor_platforms = {
+ .sensor_num = sensor_num,
+ };
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(parent, sensor_name,
+ PLATFORM_DEVID_AUTO,
+ &sensor_platforms,
+ sizeof(sensor_platforms));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return devm_add_action_or_reset(parent,
+ cros_ec_sensorhub_free_sensor,
+ pdev);
+}
+
+static int cros_ec_sensorhub_register(struct device *dev,
+ struct cros_ec_sensorhub *sensorhub)
+{
+ int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
+ struct cros_ec_dev *ec = sensorhub->ec;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+ int ret, i, sensor_num;
+ char *name;
+
+ sensor_num = cros_ec_get_sensor_count(ec);
+ if (sensor_num < 0) {
+ dev_err(dev,
+ "Unable to retrieve sensor information (err:%d)\n",
+ sensor_num);
+ return sensor_num;
+ }
+
+ if (sensor_num == 0) {
+ dev_err(dev, "Zero sensors reported.\n");
+ return -EINVAL;
+ }
+
+ /* Prepare a message to send INFO command to each sensor. */
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 1;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+ params = (struct ec_params_motion_sense *)msg->data;
+ resp = (struct ec_response_motion_sense *)msg->data;
+
+ for (i = 0; i < sensor_num; i++) {
+ params->cmd = MOTIONSENSE_CMD_INFO;
+ params->info.sensor_num = i;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ dev_warn(dev, "no info for EC sensor %d : %d/%d\n",
+ i, ret, msg->result);
+ continue;
+ }
+
+ switch (resp->info.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ name = "cros-ec-accel";
+ break;
+ case MOTIONSENSE_TYPE_BARO:
+ name = "cros-ec-baro";
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ name = "cros-ec-gyro";
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ name = "cros-ec-mag";
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ name = "cros-ec-prox";
+ break;
+ case MOTIONSENSE_TYPE_LIGHT:
+ name = "cros-ec-light";
+ break;
+ case MOTIONSENSE_TYPE_ACTIVITY:
+ name = "cros-ec-activity";
+ break;
+ default:
+ dev_warn(dev, "unknown type %d\n", resp->info.type);
+ continue;
+ }
+
+ ret = cros_ec_sensorhub_allocate_sensor(dev, name, i);
+ if (ret)
+ goto error;
+
+ sensor_type[resp->info.type]++;
+ }
+
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
+ ec->has_kb_wake_angle = true;
+
+ if (cros_ec_check_features(ec,
+ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-lid-angle",
+ 0);
+ if (ret)
+ goto error;
+ }
+
+ kfree(msg);
+ return 0;
+
+error:
+ kfree(msg);
+ return ret;
+}
+
+static int cros_ec_sensorhub_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_sensorhub *data;
+ int ret;
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->ec = dev_get_drvdata(dev->parent);
+ dev_set_drvdata(dev, data);
+
+ /* Check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) {
+ ret = cros_ec_sensorhub_register(dev, data);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If the device has sensors but does not claim to
+ * be a sensor hub, we are in legacy mode.
+ */
+ for (i = 0; i < 2; i++) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-accel-legacy", i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_sensorhub_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_sensorhub_probe,
+};
+
+module_platform_driver(cros_ec_sensorhub_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS EC MEMS Sensor Hub Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 2430e8b82810..374cdd1e868a 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -224,6 +224,7 @@ static int cros_usbpd_logger_remove(struct platform_device *pd)
struct logger_data *logger = platform_get_drvdata(pd);
cancel_delayed_work_sync(&logger->log_work);
+ destroy_workqueue(logger->log_workqueue);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
index 89007b0bc743..365f30e116ee 100644
--- a/drivers/platform/chrome/wilco_ec/Kconfig
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config WILCO_EC
tristate "ChromeOS Wilco Embedded Controller"
- depends on ACPI && X86 && CROS_EC_LPC
+ depends on ACPI && X86 && CROS_EC_LPC && LEDS_CLASS
help
If you say Y here, you get support for talking to the ChromeOS
Wilco EC over an eSPI bus. This uses a simple byte-level protocol
diff --git a/drivers/platform/chrome/wilco_ec/Makefile b/drivers/platform/chrome/wilco_ec/Makefile
index bc817164596e..ecb3145cab18 100644
--- a/drivers/platform/chrome/wilco_ec/Makefile
+++ b/drivers/platform/chrome/wilco_ec/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-wilco_ec-objs := core.o mailbox.o properties.o sysfs.o
+wilco_ec-objs := core.o keyboard_leds.o mailbox.o \
+ properties.o sysfs.o
obj-$(CONFIG_WILCO_EC) += wilco_ec.o
wilco_ec_debugfs-objs := debugfs.o
obj-$(CONFIG_WILCO_EC_DEBUGFS) += wilco_ec_debugfs.o
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 3724bf4b77c6..5210c357feef 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -5,10 +5,6 @@
* Copyright 2018 Google LLC
*
* This is the entry point for the drivers that control the Wilco EC.
- * This driver is responsible for several tasks:
- * - Initialize the register interface that is used by wilco_ec_mailbox()
- * - Create a platform device which is picked up by the debugfs driver
- * - Create a platform device which is picked up by the RTC driver
*/
#include <linux/acpi.h>
@@ -87,12 +83,31 @@ static int wilco_ec_probe(struct platform_device *pdev)
goto unregister_debugfs;
}
+ /* Set up the keyboard backlight LEDs. */
+ ret = wilco_keyboard_leds_init(ec);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to initialize keyboard LEDs: %d\n",
+ ret);
+ goto unregister_rtc;
+ }
+
ret = wilco_ec_add_sysfs(ec);
if (ret < 0) {
dev_err(dev, "Failed to create sysfs entries: %d", ret);
goto unregister_rtc;
}
+ /* Register child device to be found by charger config driver. */
+ ec->charger_pdev = platform_device_register_data(dev, "wilco-charger",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(ec->charger_pdev)) {
+ dev_err(dev, "Failed to create charger platform device\n");
+ ret = PTR_ERR(ec->charger_pdev);
+ goto remove_sysfs;
+ }
+
/* Register child device that will be found by the telemetry driver. */
ec->telem_pdev = platform_device_register_data(dev, "wilco_telem",
PLATFORM_DEVID_AUTO,
@@ -100,11 +115,13 @@ static int wilco_ec_probe(struct platform_device *pdev)
if (IS_ERR(ec->telem_pdev)) {
dev_err(dev, "Failed to create telemetry platform device\n");
ret = PTR_ERR(ec->telem_pdev);
- goto remove_sysfs;
+ goto unregister_charge_config;
}
return 0;
+unregister_charge_config:
+ platform_device_unregister(ec->charger_pdev);
remove_sysfs:
wilco_ec_remove_sysfs(ec);
unregister_rtc:
@@ -120,6 +137,7 @@ static int wilco_ec_remove(struct platform_device *pdev)
{
struct wilco_ec_device *ec = platform_get_drvdata(pdev);
+ platform_device_unregister(ec->charger_pdev);
wilco_ec_remove_sysfs(ec);
platform_device_unregister(ec->telem_pdev);
platform_device_unregister(ec->rtc_pdev);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 8d65a1e2f1a3..df5a5f6c3ec6 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -160,29 +160,29 @@ static const struct file_operations fops_raw = {
#define CMD_KB_CHROME 0x88
#define SUB_CMD_H1_GPIO 0x0A
+#define SUB_CMD_TEST_EVENT 0x0B
-struct h1_gpio_status_request {
+struct ec_request {
u8 cmd; /* Always CMD_KB_CHROME */
u8 reserved;
- u8 sub_cmd; /* Always SUB_CMD_H1_GPIO */
+ u8 sub_cmd;
} __packed;
-struct hi_gpio_status_response {
+struct ec_response {
u8 status; /* 0 if allowed */
- u8 val; /* BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL */
+ u8 val;
} __packed;
-static int h1_gpio_get(void *arg, u64 *val)
+static int send_ec_cmd(struct wilco_ec_device *ec, u8 sub_cmd, u8 *out_val)
{
- struct wilco_ec_device *ec = arg;
- struct h1_gpio_status_request rq;
- struct hi_gpio_status_response rs;
+ struct ec_request rq;
+ struct ec_response rs;
struct wilco_ec_message msg;
int ret;
memset(&rq, 0, sizeof(rq));
rq.cmd = CMD_KB_CHROME;
- rq.sub_cmd = SUB_CMD_H1_GPIO;
+ rq.sub_cmd = sub_cmd;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
@@ -196,14 +196,39 @@ static int h1_gpio_get(void *arg, u64 *val)
if (rs.status)
return -EIO;
- *val = rs.val;
+ *out_val = rs.val;
return 0;
}
+/**
+ * h1_gpio_get() - Gets h1 gpio status.
+ * @arg: The wilco EC device.
+ * @val: BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL
+ */
+static int h1_gpio_get(void *arg, u64 *val)
+{
+ return send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+}
+
DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
/**
+ * test_event_set() - Sends command to EC to cause an EC test event.
+ * @arg: The wilco EC device.
+ * @val: unused.
+ */
+static int test_event_set(void *arg, u64 val)
+{
+ u8 ret;
+
+ return send_ec_cmd(arg, SUB_CMD_TEST_EVENT, &ret);
+}
+
+/* Format is unused since it is only required for get method which is NULL */
+DEFINE_DEBUGFS_ATTRIBUTE(fops_test_event, NULL, test_event_set, "%llu\n");
+
+/**
* wilco_ec_debugfs_probe() - Create the debugfs node
* @pdev: The platform device, probably created in core.c
*
@@ -226,6 +251,8 @@ static int wilco_ec_debugfs_probe(struct platform_device *pdev)
debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
debugfs_create_file("h1_gpio", 0444, debug_info->dir, ec,
&fops_h1_gpio);
+ debugfs_create_file("test_event", 0200, debug_info->dir, ec,
+ &fops_test_event);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
new file mode 100644
index 000000000000..bb0edf51dfda
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Keyboard backlight LED driver for the Wilco Embedded Controller
+ *
+ * Copyright 2019 Google LLC
+ *
+ * Since the EC will never change the backlight level of its own accord,
+ * we don't need to implement a brightness_get() method.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/slab.h>
+
+#define WILCO_EC_COMMAND_KBBL 0x75
+#define WILCO_KBBL_MODE_FLAG_PWM BIT(1) /* Set brightness by percent. */
+#define WILCO_KBBL_DEFAULT_BRIGHTNESS 0
+
+struct wilco_keyboard_leds {
+ struct wilco_ec_device *ec;
+ struct led_classdev keyboard;
+};
+
+enum wilco_kbbl_subcommand {
+ WILCO_KBBL_SUBCMD_GET_FEATURES = 0x00,
+ WILCO_KBBL_SUBCMD_GET_STATE = 0x01,
+ WILCO_KBBL_SUBCMD_SET_STATE = 0x02,
+};
+
+/**
+ * struct wilco_keyboard_leds_msg - Message to/from EC for keyboard LED control.
+ * @command: Always WILCO_EC_COMMAND_KBBL.
+ * @status: Set by EC to 0 on success, 0xFF on failure.
+ * @subcmd: One of enum wilco_kbbl_subcommand.
+ * @reserved3: Should be 0.
+ * @mode: Bit flags for used mode, we want to use WILCO_KBBL_MODE_FLAG_PWM.
+ * @reserved5to8: Should be 0.
+ * @percent: Brightness in 0-100. Only meaningful in PWM mode.
+ * @reserved10to15: Should be 0.
+ */
+struct wilco_keyboard_leds_msg {
+ u8 command;
+ u8 status;
+ u8 subcmd;
+ u8 reserved3;
+ u8 mode;
+ u8 reserved5to8[4];
+ u8 percent;
+ u8 reserved10to15[6];
+} __packed;
+
+/* Send a request, get a response, and check that the response is good. */
+static int send_kbbl_msg(struct wilco_ec_device *ec,
+ struct wilco_keyboard_leds_msg *request,
+ struct wilco_keyboard_leds_msg *response)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = request;
+ msg.request_size = sizeof(*request);
+ msg.response_data = response;
+ msg.response_size = sizeof(*response);
+
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed sending keyboard LEDs command: %d", ret);
+ return ret;
+ }
+
+ if (response->status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response->status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_SET_STATE;
+ request.mode = WILCO_KBBL_MODE_FLAG_PWM;
+ request.percent = brightness;
+
+ return send_kbbl_msg(ec, &request, &response);
+}
+
+static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_FEATURES;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ *exists = response.status != 0xFF;
+
+ return 0;
+}
+
+/**
+ * kbbl_init() - Initialize the state of the keyboard backlight.
+ * @ec: EC device to talk to.
+ *
+ * Gets the current brightness, ensuring that the BIOS already initialized the
+ * backlight to PWM mode. If not in PWM mode, then the current brightness is
+ * meaningless, so set the brightness to WILCO_KBBL_DEFAULT_BRIGHTNESS.
+ *
+ * Return: Final brightness of the keyboard, or negative error code on failure.
+ */
+static int kbbl_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_STATE;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
+ return response.percent;
+
+ ret = set_kbbl(ec, WILCO_KBBL_DEFAULT_BRIGHTNESS);
+ if (ret < 0)
+ return ret;
+
+ return WILCO_KBBL_DEFAULT_BRIGHTNESS;
+}
+
+static int wilco_keyboard_leds_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds *wkl =
+ container_of(cdev, struct wilco_keyboard_leds, keyboard);
+ return set_kbbl(wkl->ec, brightness);
+}
+
+int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds *wkl;
+ bool leds_exist;
+ int ret;
+
+ ret = kbbl_exist(ec, &leds_exist);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed checking keyboard LEDs support: %d", ret);
+ return ret;
+ }
+ if (!leds_exist)
+ return 0;
+
+ wkl = devm_kzalloc(ec->dev, sizeof(*wkl), GFP_KERNEL);
+ if (!wkl)
+ return -ENOMEM;
+
+ wkl->ec = ec;
+ wkl->keyboard.name = "platform::kbd_backlight";
+ wkl->keyboard.max_brightness = 100;
+ wkl->keyboard.flags = LED_CORE_SUSPENDRESUME;
+ wkl->keyboard.brightness_set_blocking = wilco_keyboard_leds_set;
+ ret = kbbl_init(ec);
+ if (ret < 0)
+ return ret;
+ wkl->keyboard.brightness = ret;
+
+ return devm_led_classdev_register(ec->dev, &wkl->keyboard);
+}
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index 3b86a21005d3..f0d174b6bb21 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -23,6 +23,26 @@ struct boot_on_ac_request {
u8 reserved7;
} __packed;
+#define CMD_USB_CHARGE 0x39
+
+enum usb_charge_op {
+ USB_CHARGE_GET = 0,
+ USB_CHARGE_SET = 1,
+};
+
+struct usb_charge_request {
+ u8 cmd; /* Always CMD_USB_CHARGE */
+ u8 reserved;
+ u8 op; /* One of enum usb_charge_op */
+ u8 val; /* When setting, either 0 or 1 */
+} __packed;
+
+struct usb_charge_response {
+ u8 reserved;
+ u8 status; /* Set by EC to 0 on success, other value on failure */
+ u8 val; /* When getting, set by EC to either 0 or 1 */
+} __packed;
+
#define CMD_EC_INFO 0x38
enum get_ec_info_op {
CMD_GET_EC_LABEL = 0,
@@ -131,12 +151,83 @@ static ssize_t model_number_show(struct device *dev,
static DEVICE_ATTR_RO(model_number);
+static int send_usb_charge(struct wilco_ec_device *ec,
+ struct usb_charge_request *rq,
+ struct usb_charge_response *rs)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = rq;
+ msg.request_size = sizeof(*rq);
+ msg.response_data = rs;
+ msg.response_size = sizeof(*rs);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (rs->status)
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_GET;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", rs.val);
+}
+
+static ssize_t usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_SET;
+ rq.val = val;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(usb_charge);
static struct attribute *wilco_dev_attrs[] = {
&dev_attr_boot_on_ac.attr,
&dev_attr_build_date.attr,
&dev_attr_build_revision.attr,
&dev_attr_model_number.attr,
+ &dev_attr_usb_charge.attr,
&dev_attr_version.attr,
NULL,
};
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index b9d03c33d8dc..1176d543191a 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -406,8 +406,8 @@ static int telem_device_remove(struct platform_device *pdev)
struct telem_device_data *dev_data = platform_get_drvdata(pdev);
cdev_device_del(&dev_data->cdev, &dev_data->dev);
- put_device(&dev_data->dev);
ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
+ put_device(&dev_data->dev);
return 0;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1041d80dde46..27d5b40fb717 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -258,7 +258,7 @@ config DELL_RBU
DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
supporting application to communicate with the BIOS regarding the new
image for the image update to take effect.
- See <file:Documentation/driver-api/dell_rbu.rst> for more details on the driver.
+ See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
config FUJITSU_LAPTOP
diff --git a/drivers/platform/x86/dell_rbu.c b/drivers/platform/x86/dell_rbu.c
index 3691391fea6b..7d5453326b43 100644
--- a/drivers/platform/x86/dell_rbu.c
+++ b/drivers/platform/x86/dell_rbu.c
@@ -24,7 +24,7 @@
* on every time the packet data is written. This driver requires an
* application to break the BIOS image in to fixed sized packet chunks.
*
- * See Documentation/driver-api/dell_rbu.rst for more info.
+ * See Documentation/admin-guide/dell_rbu.rst for more info.
*/
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index b5a217b828dc..089b6244b716 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -13,9 +13,9 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
config ROCKCHIP_IODOMAIN
- tristate "Rockchip IO domain support"
- depends on POWER_AVS && ARCH_ROCKCHIP && OF
- help
- Say y here to enable support io domains on Rockchip SoCs. It is
- necessary for the io domain setting of the SoC to match the
- voltage supplied by the regulators.
+ tristate "Rockchip IO domain support"
+ depends on POWER_AVS && ARCH_ROCKCHIP && OF
+ help
+ Say y here to enable support io domains on Rockchip SoCs. It is
+ necessary for the io domain setting of the SoC to match the
+ voltage supplied by the regulators.
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 359b08596d9e..7ff48c14fae8 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -12,6 +12,7 @@
#include <linux/mfd/stm32-timers.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -19,6 +20,12 @@
#define CCMR_CHANNEL_MASK 0xFF
#define MAX_BREAKINPUT 2
+struct stm32_breakinput {
+ u32 index;
+ u32 level;
+ u32 filter;
+};
+
struct stm32_pwm {
struct pwm_chip chip;
struct mutex lock; /* protect pwm config/enable */
@@ -26,15 +33,11 @@ struct stm32_pwm {
struct regmap *regmap;
u32 max_arr;
bool have_complementary_output;
+ struct stm32_breakinput breakinputs[MAX_BREAKINPUT];
+ unsigned int num_breakinputs;
u32 capture[4] ____cacheline_aligned; /* DMA'able buffer */
};
-struct stm32_breakinput {
- u32 index;
- u32 level;
- u32 filter;
-};
-
static inline struct stm32_pwm *to_stm32_pwm_dev(struct pwm_chip *chip)
{
return container_of(chip, struct stm32_pwm, chip);
@@ -488,22 +491,19 @@ static const struct pwm_ops stm32pwm_ops = {
};
static int stm32_pwm_set_breakinput(struct stm32_pwm *priv,
- int index, int level, int filter)
+ const struct stm32_breakinput *bi)
{
- u32 bke = (index == 0) ? TIM_BDTR_BKE : TIM_BDTR_BK2E;
- int shift = (index == 0) ? TIM_BDTR_BKF_SHIFT : TIM_BDTR_BK2F_SHIFT;
- u32 mask = (index == 0) ? TIM_BDTR_BKE | TIM_BDTR_BKP | TIM_BDTR_BKF
- : TIM_BDTR_BK2E | TIM_BDTR_BK2P | TIM_BDTR_BK2F;
- u32 bdtr = bke;
+ u32 shift = TIM_BDTR_BKF_SHIFT(bi->index);
+ u32 bke = TIM_BDTR_BKE(bi->index);
+ u32 bkp = TIM_BDTR_BKP(bi->index);
+ u32 bkf = TIM_BDTR_BKF(bi->index);
+ u32 mask = bkf | bkp | bke;
+ u32 bdtr;
- /*
- * The both bits could be set since only one will be wrote
- * due to mask value.
- */
- if (level)
- bdtr |= TIM_BDTR_BKP | TIM_BDTR_BK2P;
+ bdtr = (bi->filter & TIM_BDTR_BKF_MASK) << shift | bke;
- bdtr |= (filter & TIM_BDTR_BKF_MASK) << shift;
+ if (bi->level)
+ bdtr |= bkp;
regmap_update_bits(priv->regmap, TIM_BDTR, mask, bdtr);
@@ -512,11 +512,25 @@ static int stm32_pwm_set_breakinput(struct stm32_pwm *priv,
return (bdtr & bke) ? 0 : -EINVAL;
}
-static int stm32_pwm_apply_breakinputs(struct stm32_pwm *priv,
+static int stm32_pwm_apply_breakinputs(struct stm32_pwm *priv)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < priv->num_breakinputs; i++) {
+ ret = stm32_pwm_set_breakinput(priv, &priv->breakinputs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_pwm_probe_breakinputs(struct stm32_pwm *priv,
struct device_node *np)
{
- struct stm32_breakinput breakinput[MAX_BREAKINPUT];
- int nb, ret, i, array_size;
+ int nb, ret, array_size;
+ unsigned int i;
nb = of_property_count_elems_of_size(np, "st,breakinput",
sizeof(struct stm32_breakinput));
@@ -531,20 +545,21 @@ static int stm32_pwm_apply_breakinputs(struct stm32_pwm *priv,
if (nb > MAX_BREAKINPUT)
return -EINVAL;
+ priv->num_breakinputs = nb;
array_size = nb * sizeof(struct stm32_breakinput) / sizeof(u32);
ret = of_property_read_u32_array(np, "st,breakinput",
- (u32 *)breakinput, array_size);
+ (u32 *)priv->breakinputs, array_size);
if (ret)
return ret;
- for (i = 0; i < nb && !ret; i++) {
- ret = stm32_pwm_set_breakinput(priv,
- breakinput[i].index,
- breakinput[i].level,
- breakinput[i].filter);
+ for (i = 0; i < priv->num_breakinputs; i++) {
+ if (priv->breakinputs[i].index > 1 ||
+ priv->breakinputs[i].level > 1 ||
+ priv->breakinputs[i].filter > 15)
+ return -EINVAL;
}
- return ret;
+ return stm32_pwm_apply_breakinputs(priv);
}
static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
@@ -614,7 +629,7 @@ static int stm32_pwm_probe(struct platform_device *pdev)
if (!priv->regmap || !priv->clk)
return -EINVAL;
- ret = stm32_pwm_apply_breakinputs(priv, np);
+ ret = stm32_pwm_probe_breakinputs(priv, np);
if (ret)
return ret;
@@ -647,6 +662,42 @@ static int stm32_pwm_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused stm32_pwm_suspend(struct device *dev)
+{
+ struct stm32_pwm *priv = dev_get_drvdata(dev);
+ unsigned int i;
+ u32 ccer, mask;
+
+ /* Look for active channels */
+ ccer = active_channels(priv);
+
+ for (i = 0; i < priv->chip.npwm; i++) {
+ mask = TIM_CCER_CC1E << (i * 4);
+ if (ccer & mask) {
+ dev_err(dev, "PWM %u still in use by consumer %s\n",
+ i, priv->chip.pwms[i].label);
+ return -EBUSY;
+ }
+ }
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused stm32_pwm_resume(struct device *dev)
+{
+ struct stm32_pwm *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ /* restore breakinput registers that may have been lost in low power */
+ return stm32_pwm_apply_breakinputs(priv);
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_pwm_pm_ops, stm32_pwm_suspend, stm32_pwm_resume);
+
static const struct of_device_id stm32_pwm_of_match[] = {
{ .compatible = "st,stm32-pwm", },
{ /* end node */ },
@@ -659,6 +710,7 @@ static struct platform_driver stm32_pwm_driver = {
.driver = {
.name = "stm32-pwm",
.of_match_table = stm32_pwm_of_match,
+ .pm = &stm32_pwm_pm_ops,
},
};
module_platform_driver(stm32_pwm_driver);
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 6f5840a1a82d..581d23287333 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -137,10 +137,10 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm));
- tmp = prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
+ tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
- tmp = prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
+ tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
}
@@ -156,7 +156,6 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
if (sun4i_pwm->data->has_prescaler_bypass) {
/* First, test without any prescaler when available */
prescaler = PWM_PRESCAL_MASK;
- pval = 1;
/*
* When not using any prescaler, the clock period in nanoseconds
* is not an integer so round it half up instead of
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 125a173bed45..4dd31dd9feea 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2755,7 +2755,7 @@ static int tsi721_probe(struct pci_dev *pdev,
{
int i;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
tsi_debug(INIT, &pdev->dev, "res%d %pR",
i, &pdev->resource[i]);
}
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index 33c8d1ecc988..f9e10647f94e 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -9,6 +9,8 @@
#include <linux/rio.h>
#include <linux/module.h>
+#include <linux/rio_drv.h>
+
/*
* Wrappers for all RIO configuration access functions. They just check
* alignment and call the low-level functions pointed to by rio_mport->ops.
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 2d99a3712b72..72874153972e 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/rio.h>
#include <linux/rio_ids.h>
+#include <linux/rio_drv.h>
#include "rio.h"
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 7b07281aa0ae..3ad7817ce1f0 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -129,7 +129,7 @@ config RESET_SCMI
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
- default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN || ARC
+ default ARCH_AGILEX || ARCH_ASPEED || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARC
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -138,10 +138,11 @@ config RESET_SIMPLE
Currently this driver supports:
- Altera SoCFPGAs
- ASPEED BMC SoCs
+ - Bitmain BM1880 SoC
+ - Realtek SoCs
- RCC reset controller in STM32 MCUs
- Allwinner SoCs
- ZTE's zx2967 family
- - Bitmain BM1880 SoC
config RESET_STM32MP157
bool "STM32MP157 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 3c9a64c1b7a8..ca1d49146f61 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -77,8 +77,10 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev)
* @rcdev: a pointer to the reset controller device
* @reset_spec: reset line specifier as found in the device tree
*
- * This simple translation function should be used for reset controllers
- * with 1:1 mapping, where reset lines can be indexed by number without gaps.
+ * This static translation function is used by default if of_xlate in
+ * :c:type:`reset_controller_dev` is not set. It is useful for all reset
+ * controllers with 1:1 mapping, where reset lines can be indexed by number
+ * without gaps.
*/
static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
@@ -333,7 +335,6 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
* internal state to be reset, but must be prepared for this to happen.
* Consumers must not use reset_control_reset on shared reset lines when
* reset_control_(de)assert has been used.
- * return 0.
*
* If rstc is NULL it is an optional reset and the function will just
* return 0.
@@ -392,7 +393,6 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
* After calling this function, the reset is guaranteed to be deasserted.
* Consumers must not use reset_control_reset on shared reset lines when
* reset_control_(de)assert has been used.
- * return 0.
*
* If rstc is NULL it is an optional reset and the function will just
* return 0.
diff --git a/drivers/reset/hisilicon/reset-hi3660.c b/drivers/reset/hisilicon/reset-hi3660.c
index f690b1878071..a7d4445924e5 100644
--- a/drivers/reset/hisilicon/reset-hi3660.c
+++ b/drivers/reset/hisilicon/reset-hi3660.c
@@ -56,7 +56,7 @@ static int hi3660_reset_dev(struct reset_controller_dev *rcdev,
return hi3660_reset_deassert(rcdev, idx);
}
-static struct reset_control_ops hi3660_reset_ops = {
+static const struct reset_control_ops hi3660_reset_ops = {
.reset = hi3660_reset_dev,
.assert = hi3660_reset_assert,
.deassert = hi3660_reset_deassert,
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index c53a2185a039..1dc06e08a8da 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -19,6 +19,11 @@ struct meson_audio_arb_data {
spinlock_t lock;
};
+struct meson_audio_arb_match_data {
+ const unsigned int *reset_bits;
+ unsigned int reset_num;
+};
+
#define ARB_GENERAL_BIT 31
static const unsigned int axg_audio_arb_reset_bits[] = {
@@ -30,6 +35,27 @@ static const unsigned int axg_audio_arb_reset_bits[] = {
[AXG_ARB_FRDDR_C] = 6,
};
+static const struct meson_audio_arb_match_data axg_audio_arb_match = {
+ .reset_bits = axg_audio_arb_reset_bits,
+ .reset_num = ARRAY_SIZE(axg_audio_arb_reset_bits),
+};
+
+static const unsigned int sm1_audio_arb_reset_bits[] = {
+ [AXG_ARB_TODDR_A] = 0,
+ [AXG_ARB_TODDR_B] = 1,
+ [AXG_ARB_TODDR_C] = 2,
+ [AXG_ARB_FRDDR_A] = 4,
+ [AXG_ARB_FRDDR_B] = 5,
+ [AXG_ARB_FRDDR_C] = 6,
+ [AXG_ARB_TODDR_D] = 3,
+ [AXG_ARB_FRDDR_D] = 7,
+};
+
+static const struct meson_audio_arb_match_data sm1_audio_arb_match = {
+ .reset_bits = sm1_audio_arb_reset_bits,
+ .reset_num = ARRAY_SIZE(sm1_audio_arb_reset_bits),
+};
+
static int meson_audio_arb_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
@@ -82,7 +108,13 @@ static const struct reset_control_ops meson_audio_arb_rstc_ops = {
};
static const struct of_device_id meson_audio_arb_of_match[] = {
- { .compatible = "amlogic,meson-axg-audio-arb", },
+ {
+ .compatible = "amlogic,meson-axg-audio-arb",
+ .data = &axg_audio_arb_match,
+ }, {
+ .compatible = "amlogic,meson-sm1-audio-arb",
+ .data = &sm1_audio_arb_match,
+ },
{}
};
MODULE_DEVICE_TABLE(of, meson_audio_arb_of_match);
@@ -104,10 +136,15 @@ static int meson_audio_arb_remove(struct platform_device *pdev)
static int meson_audio_arb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct meson_audio_arb_match_data *data;
struct meson_audio_arb_data *arb;
struct resource *res;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
arb = devm_kzalloc(dev, sizeof(*arb), GFP_KERNEL);
if (!arb)
return -ENOMEM;
@@ -126,8 +163,8 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
return PTR_ERR(arb->regs);
spin_lock_init(&arb->lock);
- arb->reset_bits = axg_audio_arb_reset_bits;
- arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
+ arb->reset_bits = data->reset_bits;
+ arb->rstc.nr_resets = data->reset_num;
arb->rstc.ops = &meson_audio_arb_rstc_ops;
arb->rstc.of_node = dev->of_node;
arb->rstc.owner = THIS_MODULE;
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index 7d05d766e1ea..94d7ba88d7d2 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -15,12 +15,16 @@
#include <linux/types.h>
#include <linux/of_device.h>
-#define REG_COUNT 8
#define BITS_PER_REG 32
-#define LEVEL_OFFSET 0x7c
+
+struct meson_reset_param {
+ int reg_count;
+ int level_offset;
+};
struct meson_reset {
void __iomem *reg_base;
+ const struct meson_reset_param *param;
struct reset_controller_dev rcdev;
spinlock_t lock;
};
@@ -46,10 +50,12 @@ static int meson_reset_level(struct reset_controller_dev *rcdev,
container_of(rcdev, struct meson_reset, rcdev);
unsigned int bank = id / BITS_PER_REG;
unsigned int offset = id % BITS_PER_REG;
- void __iomem *reg_addr = data->reg_base + LEVEL_OFFSET + (bank << 2);
+ void __iomem *reg_addr;
unsigned long flags;
u32 reg;
+ reg_addr = data->reg_base + data->param->level_offset + (bank << 2);
+
spin_lock_irqsave(&data->lock, flags);
reg = readl(reg_addr);
@@ -81,10 +87,21 @@ static const struct reset_control_ops meson_reset_ops = {
.deassert = meson_reset_deassert,
};
+static const struct meson_reset_param meson8b_param = {
+ .reg_count = 8,
+ .level_offset = 0x7c,
+};
+
+static const struct meson_reset_param meson_a1_param = {
+ .reg_count = 3,
+ .level_offset = 0x40,
+};
+
static const struct of_device_id meson_reset_dt_ids[] = {
- { .compatible = "amlogic,meson8b-reset" },
- { .compatible = "amlogic,meson-gxbb-reset" },
- { .compatible = "amlogic,meson-axg-reset" },
+ { .compatible = "amlogic,meson8b-reset", .data = &meson8b_param},
+ { .compatible = "amlogic,meson-gxbb-reset", .data = &meson8b_param},
+ { .compatible = "amlogic,meson-axg-reset", .data = &meson8b_param},
+ { .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
{ /* sentinel */ },
};
@@ -102,12 +119,16 @@ static int meson_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->reg_base))
return PTR_ERR(data->reg_base);
+ data->param = of_device_get_match_data(&pdev->dev);
+ if (!data->param)
+ return -ENODEV;
+
platform_set_drvdata(pdev, data);
spin_lock_init(&data->lock);
data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG;
+ data->rcdev.nr_resets = data->param->reg_count * BITS_PER_REG;
data->rcdev.ops = &meson_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
index a45923f4df6d..2b188b3bb69a 100644
--- a/drivers/reset/reset-uniphier-glue.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -141,6 +141,10 @@ static const struct of_device_id uniphier_glue_reset_match[] = {
.data = &uniphier_pro4_data,
},
{
+ .compatible = "socionext,uniphier-pro5-usb3-reset",
+ .data = &uniphier_pro4_data,
+ },
+ {
.compatible = "socionext,uniphier-pxs2-usb3-reset",
.data = &uniphier_pxs2_data,
},
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index 99e75d92dada..0144075b11a6 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -64,7 +64,7 @@ static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
PM_RESET_ACTION_PULSE);
}
-static struct reset_control_ops zynqmp_reset_ops = {
+static const struct reset_control_ops zynqmp_reset_ops = {
.reset = zynqmp_reset_reset,
.assert = zynqmp_reset_assert,
.deassert = zynqmp_reset_deassert,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 1adf9f815652..d77515d8382c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -373,17 +373,6 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
-config RTC_DRV_MESON_VRTC
- tristate "Amlogic Meson Virtual RTC"
- depends on ARCH_MESON || COMPILE_TEST
- default m if ARCH_MESON
- help
- If you say yes here you will get support for the
- Virtual RTC of Amlogic SoCs.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-meson-vrtc.
-
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK808
@@ -1337,8 +1326,6 @@ config RTC_DRV_IMXDI
config RTC_DRV_FSL_FTM_ALARM
tristate "Freescale FlexTimer alarm timer"
depends on ARCH_LAYERSCAPE || SOC_LS1021A
- select FSL_RCPM
- default y
help
For the FlexTimer in LS1012A, LS1021A, LS1028A, LS1043A, LS1046A,
LS1088A, LS208xA, we can use FTM as the wakeup source.
@@ -1360,6 +1347,17 @@ config RTC_DRV_MESON
This driver can also be built as a module, if so, the module
will be called "rtc-meson".
+config RTC_DRV_MESON_VRTC
+ tristate "Amlogic Meson Virtual RTC"
+ depends on ARCH_MESON || COMPILE_TEST
+ default m if ARCH_MESON
+ help
+ If you say yes here you will get support for the
+ Virtual RTC of Amlogic SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-meson-vrtc.
+
config RTC_DRV_OMAP
tristate "TI OMAP Real Time Clock"
depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
@@ -1459,6 +1457,7 @@ config RTC_DRV_PL031
config RTC_DRV_AT91RM9200
tristate "AT91RM9200 or some AT91SAM9 RTC"
depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
help
Driver for the internal RTC (Realtime Clock) module found on
Atmel AT91RM9200's and some AT91SAM9 chips. On AT91SAM9 chips
@@ -1510,9 +1509,9 @@ config RTC_DRV_PXA
depends on ARCH_PXA
select RTC_DRV_SA1100
help
- If you say Y here you will get access to the real time clock
- built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
- consisting of an SA1100 compatible RTC and the extended PXA RTC.
+ If you say Y here you will get access to the real time clock
+ built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
+ consisting of an SA1100 compatible RTC and the extended PXA RTC.
This RTC driver uses PXA RTC registers available since pxa27x
series (RDxR, RYxR) instead of legacy RCNR, RTAR.
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index c93ef33b01d3..794a4f036b99 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -70,7 +70,7 @@ static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
time64_t time = rtc_tm_to_time64(tm);
time64_t range_min = rtc->set_start_time ? rtc->start_secs :
rtc->range_min;
- time64_t range_max = rtc->set_start_time ?
+ timeu64_t range_max = rtc->set_start_time ?
(rtc->start_secs + rtc->range_max - rtc->range_min) :
rtc->range_max;
@@ -125,7 +125,7 @@ EXPORT_SYMBOL_GPL(rtc_read_time);
int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
{
- int err;
+ int err, uie;
err = rtc_valid_tm(tm);
if (err != 0)
@@ -137,6 +137,17 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
rtc_subtract_offset(rtc, tm);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active;
+#else
+ uie = rtc->uie_rtctimer.enabled;
+#endif
+ if (uie) {
+ err = rtc_update_irq_enable(rtc, 0);
+ if (err)
+ return err;
+ }
+
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
@@ -153,6 +164,12 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
+ if (uie) {
+ err = rtc_update_irq_enable(rtc, 1);
+ if (err)
+ return err;
+ }
+
trace_rtc_set_time(rtc_tm_to_time64(tm), err);
return err;
}
@@ -528,7 +545,7 @@ EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
- int err;
+ int rc = 0, err;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
@@ -553,7 +570,9 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
struct rtc_time tm;
ktime_t now, onesec;
- __rtc_read_time(rtc, &tm);
+ rc = __rtc_read_time(rtc, &tm);
+ if (rc)
+ goto out;
onesec = ktime_set(1, 0);
now = rtc_tm_to_ktime(tm);
rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
@@ -565,6 +584,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
out:
mutex_unlock(&rtc->ops_lock);
+
+ /*
+ * __rtc_read_time() failed, this probably means that the RTC time has
+ * never been set or less probably there is a transient error on the
+ * bus. In any case, avoid enabling emulation has this will fail when
+ * reading the time too.
+ */
+ if (rc)
+ return rc;
+
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
* Enable emulation if the driver returned -EINVAL to signal that it has
@@ -581,6 +610,8 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
/**
* rtc_handle_legacy_irq - AIE, UIE and PIE event hook
* @rtc: pointer to the rtc device
+ * @num: number of occurence of the event
+ * @mode: type of the event, RTC_AF, RTC_UF of RTC_PF
*
* This function is called when an AIE, UIE or PIE mode interrupt
* has occurred (or been emulated).
@@ -761,8 +792,8 @@ int rtc_irq_set_freq(struct rtc_device *rtc, int freq)
/**
* rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
- * @rtc rtc device
- * @timer timer being added.
+ * @rtc: rtc device
+ * @timer: timer being added.
*
* Enqueues a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
@@ -821,8 +852,8 @@ static void rtc_alarm_disable(struct rtc_device *rtc)
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
- * @rtc rtc device
- * @timer timer being removed.
+ * @rtc: rtc device
+ * @timer: timer being removed.
*
* Removes a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
@@ -859,8 +890,7 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
/**
* rtc_timer_do_work - Expires rtc timers
- * @rtc rtc device
- * @timer timer being removed.
+ * @work: work item
*
* Expires rtc timers. Reprograms next alarm event if needed.
* Called via worktask.
@@ -993,8 +1023,8 @@ void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
/**
* rtc_read_offset - Read the amount of rtc offset in parts per billion
- * @ rtc: rtc device to be used
- * @ offset: the offset in parts per billion
+ * @rtc: rtc device to be used
+ * @offset: the offset in parts per billion
*
* see below for details.
*
@@ -1022,8 +1052,8 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
/**
* rtc_set_offset - Adjusts the duration of the average second
- * @ rtc: rtc device to be used
- * @ offset: the offset in parts per billion
+ * @rtc: rtc device to be used
+ * @offset: the offset in parts per billion
*
* Some rtc's allow an adjustment to the average duration of a second
* to compensate for differences in the actual clock rate due to temperature,
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index cdad6f00debf..811fe2005488 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -900,16 +900,6 @@ err:
return ret;
}
-static int abb5zes3_remove(struct i2c_client *client)
-{
- struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
-
- if (rtc_data->irq > 0)
- device_init_wakeup(&client->dev, false);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int abb5zes3_rtc_suspend(struct device *dev)
{
@@ -956,7 +946,6 @@ static struct i2c_driver abb5zes3_driver = {
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
.probe = abb5zes3_probe,
- .remove = abb5zes3_remove,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 9351bd52477e..94d7c22fc4f3 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -74,7 +74,7 @@ struct armada38x_rtc {
int irq;
bool initialized;
struct value_to_freq *val_to_freq;
- struct armada38x_rtc_data *data;
+ const struct armada38x_rtc_data *data;
};
#define ALARM1 0
@@ -501,17 +501,14 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct armada38x_rtc *rtc;
- const struct of_device_id *match;
-
- match = of_match_device(armada38x_rtc_of_match_table, &pdev->dev);
- if (!match)
- return -ENODEV;
rtc = devm_kzalloc(&pdev->dev, sizeof(struct armada38x_rtc),
GFP_KERNEL);
if (!rtc)
return -ENOMEM;
+ rtc->data = of_device_get_match_data(&pdev->dev);
+
rtc->val_to_freq = devm_kcalloc(&pdev->dev, SAMPLE_NR,
sizeof(struct value_to_freq), GFP_KERNEL);
if (!rtc->val_to_freq)
@@ -553,7 +550,6 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
*/
rtc->rtc_dev->ops = &armada38x_rtc_ops_noirq;
}
- rtc->data = (struct armada38x_rtc_data *)match->data;
/* Update RTC-MBUS bridge timing parameters */
rtc->data->update_mbus_timing(rtc);
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 10413d803caa..10064bdabdff 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -245,7 +245,6 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
{
struct asm9260_rtc_priv *priv;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq_alarm, ret;
u32 ccr;
@@ -260,8 +259,7 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
if (irq_alarm < 0)
return irq_alarm;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->iobase = devm_ioremap_resource(dev, res);
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index e351d35b29a3..eacdd0637cce 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -85,14 +85,12 @@ static const struct rtc_class_ops aspeed_rtc_ops = {
static int aspeed_rtc_probe(struct platform_device *pdev)
{
struct aspeed_rtc *rtc;
- struct resource *res;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index d119c6e6353e..3b833e02a657 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -319,7 +319,6 @@ static const struct at91_rtc_config at91sam9x5_config = {
.use_shadow_imr = true,
};
-#ifdef CONFIG_OF
static const struct of_device_id at91_rtc_dt_ids[] = {
{
.compatible = "atmel,at91rm9200-rtc",
@@ -332,22 +331,6 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
}
};
MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
-#endif
-
-static const struct at91_rtc_config *
-at91_rtc_get_config(struct platform_device *pdev)
-{
- const struct of_device_id *match;
-
- if (pdev->dev.of_node) {
- match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
- if (!match)
- return NULL;
- return (const struct at91_rtc_config *)match->data;
- }
-
- return &at91rm9200_config;
-}
static const struct rtc_class_ops at91_rtc_ops = {
.read_time = at91_rtc_readtime,
@@ -367,7 +350,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
struct resource *regs;
int ret = 0;
- at91_rtc_config = at91_rtc_get_config(pdev);
+ at91_rtc_config = of_device_get_match_data(&pdev->dev);
if (!at91_rtc_config)
return -ENODEV;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index bb3ba7bfe6a5..e39e89867d29 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -334,7 +334,6 @@ static const struct rtc_class_ops at91_rtc_ops = {
*/
static int at91_rtc_probe(struct platform_device *pdev)
{
- struct resource *r;
struct sam9_rtc *rtc;
int ret, irq;
u32 mr;
@@ -358,8 +357,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtt = devm_ioremap_resource(&pdev->dev, r);
+ rtc->rtt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtt))
return PTR_ERR(rtc->rtt);
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 7744333b0f40..627037aa66a8 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -491,3 +491,4 @@ module_platform_driver(bd70528_rtc);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 RTC driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-rtc");
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index 3e9800f9878a..4fee57c51280 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -200,7 +200,6 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct brcmstb_waketmr *timer;
- struct resource *res;
int ret;
timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
@@ -210,8 +209,7 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, timer);
timer->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- timer->base = devm_ioremap_resource(dev, res);
+ timer->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->base))
return PTR_ERR(timer->base);
@@ -277,6 +275,7 @@ static int brcmstb_waketmr_remove(struct platform_device *pdev)
struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev);
unregister_reboot_notifier(&timer->reboot_notifier);
+ clk_disable_unprepare(timer->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c
index 592aae23cbaf..595d5d252850 100644
--- a/drivers/rtc/rtc-cadence.c
+++ b/drivers/rtc/rtc-cadence.c
@@ -255,7 +255,6 @@ static const struct rtc_class_ops cdns_rtc_ops = {
static int cdns_rtc_probe(struct platform_device *pdev)
{
struct cdns_rtc *crtc;
- struct resource *res;
int ret;
unsigned long ref_clk_freq;
@@ -263,8 +262,7 @@ static int cdns_rtc_probe(struct platform_device *pdev)
if (!crtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- crtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ crtc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(crtc->regs))
return PTR_ERR(crtc->regs);
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 4ac850837153..da59917c9ee8 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -164,15 +164,13 @@ static int __init coh901331_probe(struct platform_device *pdev)
{
int ret;
struct coh901331_port *rtap;
- struct resource *res;
rtap = devm_kzalloc(&pdev->dev,
sizeof(struct coh901331_port), GFP_KERNEL);
if (!rtap)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtap->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ rtap->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtap->virtbase))
return PTR_ERR(rtap->virtbase);
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 6909e01936d9..d043d30f05bc 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -107,11 +107,7 @@ static int cros_ec_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(dev);
struct cros_ec_device *cros_ec = cros_ec_rtc->cros_ec;
int ret;
- time64_t time;
-
- time = rtc_tm_to_time64(tm);
- if (time < 0 || time > U32_MAX)
- return -EINVAL;
+ time64_t time = rtc_tm_to_time64(tm);
ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_VALUE, (u32)time);
if (ret < 0) {
@@ -348,14 +344,16 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
return ret;
}
- cros_ec_rtc->rtc = devm_rtc_device_register(&pdev->dev, DRV_NAME,
- &cros_ec_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(cros_ec_rtc->rtc)) {
- ret = PTR_ERR(cros_ec_rtc->rtc);
- dev_err(&pdev->dev, "failed to register rtc device\n");
+ cros_ec_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(cros_ec_rtc->rtc))
+ return PTR_ERR(cros_ec_rtc->rtc);
+
+ cros_ec_rtc->rtc->ops = &cros_ec_rtc_ops;
+ cros_ec_rtc->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(cros_ec_rtc->rtc);
+ if (ret)
return ret;
- }
/* Get RTC events from the EC. */
cros_ec_rtc->notifier.notifier_call = cros_ec_rtc_event;
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 15908d51b1cb..046b1d4c3dae 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -483,6 +483,9 @@ static int da9063_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->uie_unsupported = 1;
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
+ if (irq_alarm < 0)
+ return irq_alarm;
+
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index d8e0db2e7fc6..390b7351e0fe 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -469,7 +469,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct davinci_rtc *davinci_rtc;
- struct resource *res;
int ret = 0;
davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL);
@@ -480,8 +479,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
if (davinci_rtc->irq < 0)
return davinci_rtc->irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- davinci_rtc->base = devm_ioremap_resource(dev, res);
+ davinci_rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(davinci_rtc->base))
return PTR_ERR(davinci_rtc->base);
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 0aecc3f8e721..200d85b01e8b 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -175,7 +175,6 @@ static irqreturn_t dc_rtc_irq(int irq, void *dev_id)
static int __init dc_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct dc_rtc *rtc;
int irq, ret;
@@ -183,8 +182,7 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->regs))
return PTR_ERR(rtc->regs);
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
index b225bcfef50b..7eeb3f359de8 100644
--- a/drivers/rtc/rtc-ds1216.c
+++ b/drivers/rtc/rtc-ds1216.c
@@ -137,7 +137,6 @@ static const struct rtc_class_ops ds1216_rtc_ops = {
static int __init ds1216_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct ds1216_priv *priv;
u8 dummy[8];
@@ -147,8 +146,7 @@ static int __init ds1216_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->ioaddr))
return PTR_ERR(priv->ioaddr);
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index a06508b6c404..7acf849d4902 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -323,15 +323,13 @@ static const struct rtc_class_ops ds1286_ops = {
static int ds1286_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- struct resource *res;
struct ds1286_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(struct ds1286_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->rtcregs = devm_ioremap_resource(&pdev->dev, res);
+ priv->rtcregs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->rtcregs))
return PTR_ERR(priv->rtcregs);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 4faa24c88af5..b3de6d2e680a 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -15,8 +15,6 @@
#include <linux/rtc.h>
#include <linux/spi/spi.h>
-#define DRV_NAME "rtc-ds1302"
-
#define RTC_CMD_READ 0x81 /* Read command */
#define RTC_CMD_WRITE 0x80 /* Write command */
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index fa6de31d5793..d21004a68ee0 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -78,42 +78,19 @@ struct ds1343_priv {
struct spi_device *spi;
struct rtc_device *rtc;
struct regmap *map;
- struct mutex mutex;
- unsigned int irqen;
int irq;
- int alarm_sec;
- int alarm_min;
- int alarm_hour;
- int alarm_mday;
};
-static int ds1343_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
-#ifdef RTC_SET_CHARGE
- case RTC_SET_CHARGE:
- {
- int val;
-
- if (copy_from_user(&val, (int __user *)arg, sizeof(int)))
- return -EFAULT;
-
- return regmap_write(priv->map, DS1343_TRICKLE_REG, val);
- }
- break;
-#endif
- }
-
- return -ENOIOCTLCMD;
-}
-
static ssize_t ds1343_show_glitchfilter(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
int glitch_filt_status, data;
+ int res;
- regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ res = regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ if (res)
+ return res;
glitch_filt_status = !!(data & DS1343_EGFIL);
@@ -127,21 +104,19 @@ static ssize_t ds1343_store_glitchfilter(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int data;
-
- regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
+ int data = 0;
+ int res;
if (strncmp(buf, "enabled", 7) == 0)
- data |= DS1343_EGFIL;
-
- else if (strncmp(buf, "disabled", 8) == 0)
- data &= ~(DS1343_EGFIL);
-
- else
+ data = DS1343_EGFIL;
+ else if (strncmp(buf, "disabled", 8))
return -EINVAL;
- regmap_write(priv->map, DS1343_CONTROL_REG, data);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_EGFIL, data);
+ if (res)
+ return res;
return count;
}
@@ -168,11 +143,13 @@ static int ds1343_nvram_read(void *priv, unsigned int off, void *val,
static ssize_t ds1343_show_tricklecharger(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int data;
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
+ int res, data;
char *diodes = "disabled", *resistors = " ";
- regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
+ res = regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
+ if (res)
+ return res;
if ((data & 0xf0) == DS1343_TRICKLE_MAGIC) {
switch (data & 0x0c) {
@@ -209,28 +186,15 @@ static ssize_t ds1343_show_tricklecharger(struct device *dev,
static DEVICE_ATTR(trickle_charger, S_IRUGO, ds1343_show_tricklecharger, NULL);
-static int ds1343_sysfs_register(struct device *dev)
-{
- int err;
-
- err = device_create_file(dev, &dev_attr_glitch_filter);
- if (err)
- return err;
-
- err = device_create_file(dev, &dev_attr_trickle_charger);
- if (!err)
- return 0;
-
- device_remove_file(dev, &dev_attr_glitch_filter);
-
- return err;
-}
+static struct attribute *ds1343_attrs[] = {
+ &dev_attr_glitch_filter.attr,
+ &dev_attr_trickle_charger.attr,
+ NULL
+};
-static void ds1343_sysfs_unregister(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_glitch_filter);
- device_remove_file(dev, &dev_attr_trickle_charger);
-}
+static const struct attribute_group ds1343_attr_group = {
+ .attrs = ds1343_attrs,
+};
static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
{
@@ -256,144 +220,78 @@ static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
static int ds1343_set_time(struct device *dev, struct rtc_time *dt)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res;
-
- res = regmap_write(priv->map, DS1343_SECONDS_REG,
- bin2bcd(dt->tm_sec));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_MINUTES_REG,
- bin2bcd(dt->tm_min));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_HOURS_REG,
- bin2bcd(dt->tm_hour) & 0x3F);
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_DAY_REG,
- bin2bcd(dt->tm_wday + 1));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_DATE_REG,
- bin2bcd(dt->tm_mday));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_MONTH_REG,
- bin2bcd(dt->tm_mon + 1));
- if (res)
- return res;
-
- dt->tm_year %= 100;
-
- res = regmap_write(priv->map, DS1343_YEAR_REG,
- bin2bcd(dt->tm_year));
- if (res)
- return res;
-
- return 0;
+ u8 buf[7];
+
+ buf[0] = bin2bcd(dt->tm_sec);
+ buf[1] = bin2bcd(dt->tm_min);
+ buf[2] = bin2bcd(dt->tm_hour) & 0x3F;
+ buf[3] = bin2bcd(dt->tm_wday + 1);
+ buf[4] = bin2bcd(dt->tm_mday);
+ buf[5] = bin2bcd(dt->tm_mon + 1);
+ buf[6] = bin2bcd(dt->tm_year - 100);
+
+ return regmap_bulk_write(priv->map, DS1343_SECONDS_REG,
+ buf, sizeof(buf));
}
-static int ds1343_update_alarm(struct device *dev)
+static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- unsigned int control, stat;
unsigned char buf[4];
- int res = 0;
+ unsigned int val;
+ int res;
- res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
- if (res)
- return res;
+ if (priv->irq <= 0)
+ return -EINVAL;
- res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
+ res = regmap_read(priv->map, DS1343_STATUS_REG, &val);
if (res)
return res;
- control &= ~(DS1343_A0IE);
- stat &= ~(DS1343_IRQF0);
-
- res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
- if (res)
- return res;
+ alarm->pending = !!(val & DS1343_IRQF0);
- res = regmap_write(priv->map, DS1343_STATUS_REG, stat);
+ res = regmap_read(priv->map, DS1343_CONTROL_REG, &val);
if (res)
return res;
+ alarm->enabled = !!(val & DS1343_A0IE);
- buf[0] = priv->alarm_sec < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_sec) & 0x7F;
- buf[1] = priv->alarm_min < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_min) & 0x7F;
- buf[2] = priv->alarm_hour < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_hour) & 0x3F;
- buf[3] = priv->alarm_mday < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_mday) & 0x7F;
-
- res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
+ res = regmap_bulk_read(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
if (res)
return res;
- if (priv->irqen) {
- control |= DS1343_A0IE;
- res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
- }
+ alarm->time.tm_sec = bcd2bin(buf[0]) & 0x7f;
+ alarm->time.tm_min = bcd2bin(buf[1]) & 0x7f;
+ alarm->time.tm_hour = bcd2bin(buf[2]) & 0x3f;
+ alarm->time.tm_mday = bcd2bin(buf[3]) & 0x3f;
- return res;
+ return 0;
}
-static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
+ unsigned char buf[4];
int res = 0;
- unsigned int stat;
if (priv->irq <= 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
-
- res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG, DS1343_A0IE, 0);
if (res)
- goto out;
-
- alarm->enabled = !!(priv->irqen & RTC_AF);
- alarm->pending = !!(stat & DS1343_IRQF0);
-
- alarm->time.tm_sec = priv->alarm_sec < 0 ? 0 : priv->alarm_sec;
- alarm->time.tm_min = priv->alarm_min < 0 ? 0 : priv->alarm_min;
- alarm->time.tm_hour = priv->alarm_hour < 0 ? 0 : priv->alarm_hour;
- alarm->time.tm_mday = priv->alarm_mday < 0 ? 0 : priv->alarm_mday;
-
-out:
- mutex_unlock(&priv->mutex);
- return res;
-}
-
-static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res = 0;
-
- if (priv->irq <= 0)
- return -EINVAL;
+ return res;
- mutex_lock(&priv->mutex);
+ buf[0] = bin2bcd(alarm->time.tm_sec);
+ buf[1] = bin2bcd(alarm->time.tm_min);
+ buf[2] = bin2bcd(alarm->time.tm_hour);
+ buf[3] = bin2bcd(alarm->time.tm_mday);
- priv->alarm_sec = alarm->time.tm_sec;
- priv->alarm_min = alarm->time.tm_min;
- priv->alarm_hour = alarm->time.tm_hour;
- priv->alarm_mday = alarm->time.tm_mday;
+ res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
+ if (res)
+ return res;
if (alarm->enabled)
- priv->irqen |= RTC_AF;
-
- res = ds1343_update_alarm(dev);
-
- mutex_unlock(&priv->mutex);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, DS1343_A0IE);
return res;
}
@@ -401,32 +299,21 @@ static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int ds1343_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res = 0;
if (priv->irq <= 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
-
- if (enabled)
- priv->irqen |= RTC_AF;
- else
- priv->irqen &= ~RTC_AF;
-
- res = ds1343_update_alarm(dev);
-
- mutex_unlock(&priv->mutex);
-
- return res;
+ return regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, enabled ? DS1343_A0IE : 0);
}
static irqreturn_t ds1343_thread(int irq, void *dev_id)
{
struct ds1343_priv *priv = dev_id;
- unsigned int stat, control;
+ unsigned int stat;
int res = 0;
- mutex_lock(&priv->mutex);
+ rtc_lock(priv->rtc);
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
if (res)
@@ -436,23 +323,18 @@ static irqreturn_t ds1343_thread(int irq, void *dev_id)
stat &= ~DS1343_IRQF0;
regmap_write(priv->map, DS1343_STATUS_REG, stat);
- res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
- if (res)
- goto out;
-
- control &= ~DS1343_A0IE;
- regmap_write(priv->map, DS1343_CONTROL_REG, control);
-
rtc_update_irq(priv->rtc, 1, RTC_AF | RTC_IRQF);
+
+ regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, 0);
}
out:
- mutex_unlock(&priv->mutex);
+ rtc_unlock(priv->rtc);
return IRQ_HANDLED;
}
static const struct rtc_class_ops ds1343_rtc_ops = {
- .ioctl = ds1343_ioctl,
.read_time = ds1343_read_time,
.set_time = ds1343_set_time,
.read_alarm = ds1343_read_alarm,
@@ -481,7 +363,6 @@ static int ds1343_probe(struct spi_device *spi)
return -ENOMEM;
priv->spi = spi;
- mutex_init(&priv->mutex);
/* RTC DS1347 works in spi mode 3 and
* its chip select is active high
@@ -520,6 +401,13 @@ static int ds1343_probe(struct spi_device *spi)
priv->rtc->nvram_old_abi = true;
priv->rtc->ops = &ds1343_rtc_ops;
+ priv->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ priv->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ res = rtc_add_group(priv->rtc, &ds1343_attr_group);
+ if (res)
+ dev_err(&spi->dev,
+ "unable to create sysfs entries for rtc ds1343\n");
res = rtc_register_device(priv->rtc);
if (res)
@@ -544,31 +432,12 @@ static int ds1343_probe(struct spi_device *spi)
}
}
- res = ds1343_sysfs_register(&spi->dev);
- if (res)
- dev_err(&spi->dev,
- "unable to create sysfs entries for rtc ds1343\n");
-
return 0;
}
static int ds1343_remove(struct spi_device *spi)
{
- struct ds1343_priv *priv = spi_get_drvdata(spi);
-
- if (spi->irq) {
- mutex_lock(&priv->mutex);
- priv->irqen &= ~RTC_AF;
- mutex_unlock(&priv->mutex);
-
- dev_pm_clear_wake_irq(&spi->dev);
- device_init_wakeup(&spi->dev, false);
- devm_free_irq(&spi->dev, spi->irq, priv);
- }
-
- spi_set_drvdata(spi, NULL);
-
- ds1343_sysfs_unregister(&spi->dev);
+ dev_pm_clear_wake_irq(&spi->dev);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index d392a7bfdd1c..7025cf3fb9f8 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -26,9 +26,15 @@
#define DS1347_DAY_REG 0x0B
#define DS1347_YEAR_REG 0x0D
#define DS1347_CONTROL_REG 0x0F
+#define DS1347_CENTURY_REG 0x13
#define DS1347_STATUS_REG 0x17
#define DS1347_CLOCK_BURST 0x3F
+#define DS1347_WP_BIT BIT(7)
+
+#define DS1347_NEOSC_BIT BIT(7)
+#define DS1347_OSF_BIT BIT(2)
+
static const struct regmap_range ds1347_ranges[] = {
{
.range_min = DS1347_SECONDS_REG,
@@ -43,35 +49,54 @@ static const struct regmap_access_table ds1347_access_table = {
static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
{
- struct spi_device *spi = to_spi_device(dev);
- struct regmap *map;
- int err;
+ struct regmap *map = dev_get_drvdata(dev);
+ unsigned int status, century, secs;
unsigned char buf[8];
+ int err;
- map = spi_get_drvdata(spi);
-
- err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
+ err = regmap_read(map, DS1347_STATUS_REG, &status);
if (err)
return err;
+ if (status & DS1347_OSF_BIT)
+ return -EINVAL;
+
+ do {
+ err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
+ if (err)
+ return err;
+
+ err = regmap_read(map, DS1347_CENTURY_REG, &century);
+ if (err)
+ return err;
+
+ err = regmap_read(map, DS1347_SECONDS_REG, &secs);
+ if (err)
+ return err;
+ } while (buf[0] != secs);
+
dt->tm_sec = bcd2bin(buf[0]);
- dt->tm_min = bcd2bin(buf[1]);
+ dt->tm_min = bcd2bin(buf[1] & 0x7f);
dt->tm_hour = bcd2bin(buf[2] & 0x3F);
dt->tm_mday = bcd2bin(buf[3]);
dt->tm_mon = bcd2bin(buf[4]) - 1;
dt->tm_wday = bcd2bin(buf[5]) - 1;
- dt->tm_year = bcd2bin(buf[6]) + 100;
+ dt->tm_year = (bcd2bin(century) * 100) + bcd2bin(buf[6]) - 1900;
return 0;
}
static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
{
- struct spi_device *spi = to_spi_device(dev);
- struct regmap *map;
+ struct regmap *map = dev_get_drvdata(dev);
+ unsigned int century;
unsigned char buf[8];
+ int err;
- map = spi_get_drvdata(spi);
+ err = regmap_update_bits(map, DS1347_STATUS_REG,
+ DS1347_NEOSC_BIT, DS1347_NEOSC_BIT);
+ if (err)
+ return err;
buf[0] = bin2bcd(dt->tm_sec);
buf[1] = bin2bcd(dt->tm_min);
@@ -79,16 +104,20 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
buf[3] = bin2bcd(dt->tm_mday);
buf[4] = bin2bcd(dt->tm_mon + 1);
buf[5] = bin2bcd(dt->tm_wday + 1);
+ buf[6] = bin2bcd(dt->tm_year % 100);
+ buf[7] = bin2bcd(0x00);
- /* year in linux is from 1900 i.e in range of 100
- in rtc it is from 00 to 99 */
- dt->tm_year = dt->tm_year % 100;
+ err = regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
+ if (err)
+ return err;
- buf[6] = bin2bcd(dt->tm_year);
- buf[7] = bin2bcd(0x00);
+ century = (dt->tm_year / 100) + 19;
+ err = regmap_write(map, DS1347_CENTURY_REG, century);
+ if (err)
+ return err;
- /* write the rtc settings */
- return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
+ return regmap_update_bits(map, DS1347_STATUS_REG,
+ DS1347_NEOSC_BIT | DS1347_OSF_BIT, 0);
}
static const struct rtc_class_ops ds1347_rtc_ops = {
@@ -101,8 +130,7 @@ static int ds1347_probe(struct spi_device *spi)
struct rtc_device *rtc;
struct regmap_config config;
struct regmap *map;
- unsigned int data;
- int res;
+ int err;
memset(&config, 0, sizeof(config));
config.reg_bits = 8;
@@ -125,36 +153,20 @@ static int ds1347_probe(struct spi_device *spi)
spi_set_drvdata(spi, map);
- /* RTC Settings */
- res = regmap_read(map, DS1347_SECONDS_REG, &data);
- if (res)
- return res;
-
/* Disable the write protect of rtc */
- regmap_read(map, DS1347_CONTROL_REG, &data);
- data = data & ~(1<<7);
- regmap_write(map, DS1347_CONTROL_REG, data);
-
- /* Enable the oscillator , disable the oscillator stop flag,
- and glitch filter to reduce current consumption */
- regmap_read(map, DS1347_STATUS_REG, &data);
- data = data & 0x1B;
- regmap_write(map, DS1347_STATUS_REG, data);
-
- /* display the settings */
- regmap_read(map, DS1347_CONTROL_REG, &data);
- dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
-
- regmap_read(map, DS1347_STATUS_REG, &data);
- dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
-
- rtc = devm_rtc_device_register(&spi->dev, "ds1347",
- &ds1347_rtc_ops, THIS_MODULE);
+ err = regmap_update_bits(map, DS1347_CONTROL_REG, DS1347_WP_BIT, 0);
+ if (err)
+ return err;
+ rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- return 0;
+ rtc->ops = &ds1347_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ rtc->range_max = RTC_TIMESTAMP_END_9999;
+
+ return rtc_register_device(rtc);
}
static struct spi_driver ds1347_driver = {
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 6e96916c41ff..6e9ddcd03992 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -439,14 +439,13 @@ static void ds1374_wdt_ping(void)
static void ds1374_wdt_disable(void)
{
- int ret = -ENOIOCTLCMD;
int cr;
cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
/* Disable watchdog timer */
cr &= ~DS1374_REG_CR_WACE;
- ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
+ i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
}
/*
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index b6a477519280..a63872c4c76d 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -414,7 +414,6 @@ static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
static int ds1511_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct rtc_plat_data *pdata;
int ret = 0;
struct nvmem_config ds1511_nvmem_cfg = {
@@ -431,8 +430,7 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ds1511_base = devm_ioremap_resource(&pdev->dev, res);
+ ds1511_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ds1511_base))
return PTR_ERR(ds1511_base);
pdata->ioaddr = ds1511_base;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 219d6b520a69..cdf5e05b9489 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -249,7 +249,6 @@ static int ds1553_nvram_write(void *priv, unsigned int pos, void *val,
static int ds1553_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
unsigned int cen, sec;
struct rtc_plat_data *pdata;
void __iomem *ioaddr;
@@ -268,8 +267,7 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 184e4a3e2bef..56c670af2e50 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -31,7 +31,10 @@
/* ----------------------------------------------------------------------- */
-/* Standard read/write functions if platform does not provide overrides */
+/*
+ * Standard read/write
+ * all registers are mapped in CPU address space
+ */
/**
* ds1685_read - read a value from an rtc register.
@@ -59,6 +62,35 @@ ds1685_write(struct ds1685_priv *rtc, int reg, u8 value)
}
/* ----------------------------------------------------------------------- */
+/*
+ * Indirect read/write functions
+ * access happens via address and data register mapped in CPU address space
+ */
+
+/**
+ * ds1685_indirect_read - read a value from an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to read.
+ */
+static u8
+ds1685_indirect_read(struct ds1685_priv *rtc, int reg)
+{
+ writeb(reg, rtc->regs);
+ return readb(rtc->data);
+}
+
+/**
+ * ds1685_indirect_write - write a value to an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to write.
+ * @value: value to write to the register.
+ */
+static void
+ds1685_indirect_write(struct ds1685_priv *rtc, int reg, u8 value)
+{
+ writeb(reg, rtc->regs);
+ writeb(value, rtc->data);
+}
/* ----------------------------------------------------------------------- */
/* Inlined functions */
@@ -229,7 +261,7 @@ static int
ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
- u8 ctrlb, century;
+ u8 century;
u8 seconds, minutes, hours, wday, mday, month, years;
/* Fetch the time info from the RTC registers. */
@@ -242,7 +274,6 @@ ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
month = rtc->read(rtc, RTC_MONTH);
years = rtc->read(rtc, RTC_YEAR);
century = rtc->read(rtc, RTC_CENTURY);
- ctrlb = rtc->read(rtc, RTC_CTRL_B);
ds1685_rtc_end_data_access(rtc);
/* bcd2bin if needed, perform fixups, and store to rtc_time. */
@@ -723,7 +754,7 @@ static int
ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
- u8 ctrla, ctrlb, ctrlc, ctrld, ctrl4a, ctrl4b, ssn[8];
+ u8 ctrla, ctrlb, ctrld, ctrl4a, ctrl4b, ssn[8];
char *model;
/* Read all the relevant data from the control registers. */
@@ -731,7 +762,6 @@ ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
ds1685_rtc_get_ssn(rtc, ssn);
ctrla = rtc->read(rtc, RTC_CTRL_A);
ctrlb = rtc->read(rtc, RTC_CTRL_B);
- ctrlc = rtc->read(rtc, RTC_CTRL_C);
ctrld = rtc->read(rtc, RTC_CTRL_D);
ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
ctrl4b = rtc->read(rtc, RTC_EXT_CTRL_4B);
@@ -1009,7 +1039,7 @@ ds1685_rtc_sysfs_serial_show(struct device *dev,
}
static DEVICE_ATTR(serial, S_IRUGO, ds1685_rtc_sysfs_serial_show, NULL);
-/**
+/*
* struct ds1685_rtc_sysfs_misc_attrs - list for misc RTC features.
*/
static struct attribute*
@@ -1020,7 +1050,7 @@ ds1685_rtc_sysfs_misc_attrs[] = {
NULL,
};
-/**
+/*
* struct ds1685_rtc_sysfs_misc_grp - attr group for misc RTC features.
*/
static const struct attribute_group
@@ -1040,7 +1070,6 @@ static int
ds1685_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc_dev;
- struct resource *res;
struct ds1685_priv *rtc;
struct ds1685_rtc_platform_data *pdata;
u8 ctrla, ctrlb, hours;
@@ -1063,35 +1092,29 @@ ds1685_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- /*
- * Allocate/setup any IORESOURCE_MEM resources, if required. Not all
- * platforms put the RTC in an easy-access place. Like the SGI Octane,
- * which attaches the RTC to a "ByteBus", hooked to a SuperIO chip
- * that sits behind the IOC3 PCI metadevice.
- */
- if (pdata->alloc_io_resources) {
- /* Get the platform resources. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
- rtc->size = resource_size(res);
-
- /* Request a memory region. */
- /* XXX: mmio-only for now. */
- if (!devm_request_mem_region(&pdev->dev, res->start, rtc->size,
- pdev->name))
- return -EBUSY;
-
- /*
- * Set the base address for the rtc, and ioremap its
- * registers.
- */
- rtc->baseaddr = res->start;
- rtc->regs = devm_ioremap(&pdev->dev, res->start, rtc->size);
- if (!rtc->regs)
- return -ENOMEM;
+ /* Setup resources and access functions */
+ switch (pdata->access_type) {
+ case ds1685_reg_direct:
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+ rtc->read = ds1685_read;
+ rtc->write = ds1685_write;
+ break;
+ case ds1685_reg_indirect:
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+ rtc->data = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(rtc->data))
+ return PTR_ERR(rtc->data);
+ rtc->read = ds1685_indirect_read;
+ rtc->write = ds1685_indirect_write;
+ break;
}
- rtc->alloc_io_resources = pdata->alloc_io_resources;
+
+ if (!rtc->read || !rtc->write)
+ return -ENXIO;
/* Get the register step size. */
if (pdata->regstep > 0)
@@ -1099,24 +1122,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
else
rtc->regstep = 1;
- /* Platform read function, else default if mmio setup */
- if (pdata->plat_read)
- rtc->read = pdata->plat_read;
- else
- if (pdata->alloc_io_resources)
- rtc->read = ds1685_read;
- else
- return -ENXIO;
-
- /* Platform write function, else default if mmio setup */
- if (pdata->plat_write)
- rtc->write = pdata->plat_write;
- else
- if (pdata->alloc_io_resources)
- rtc->write = ds1685_write;
- else
- return -ENXIO;
-
/* Platform pre-shutdown function, if defined. */
if (pdata->plat_prepare_poweroff)
rtc->prepare_poweroff = pdata->plat_prepare_poweroff;
@@ -1271,7 +1276,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
/* See if the platform doesn't support UIE. */
if (pdata->uie_unsupported)
rtc_dev->uie_unsupported = 1;
- rtc->uie_unsupported = pdata->uie_unsupported;
rtc->dev = rtc_dev;
@@ -1351,7 +1355,7 @@ ds1685_rtc_remove(struct platform_device *pdev)
return 0;
}
-/**
+/*
* ds1685_rtc_driver - rtc driver properties.
*/
static struct platform_driver ds1685_rtc_driver = {
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 77cca1392253..9f176bce48ba 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -71,7 +71,7 @@ static int em3027_get_time(struct device *dev, struct rtc_time *tm)
tm->tm_hour = bcd2bin(buf[2]);
tm->tm_mday = bcd2bin(buf[3]);
tm->tm_wday = bcd2bin(buf[4]);
- tm->tm_mon = bcd2bin(buf[5]);
+ tm->tm_mon = bcd2bin(buf[5]) - 1;
tm->tm_year = bcd2bin(buf[6]) + 100;
return 0;
@@ -94,7 +94,7 @@ static int em3027_set_time(struct device *dev, struct rtc_time *tm)
buf[3] = bin2bcd(tm->tm_hour);
buf[4] = bin2bcd(tm->tm_mday);
buf[5] = bin2bcd(tm->tm_wday);
- buf[6] = bin2bcd(tm->tm_mon);
+ buf[6] = bin2bcd(tm->tm_mon + 1);
buf[7] = bin2bcd(tm->tm_year % 100);
/* write time/date registers */
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 1766496385fe..8ec9ea1ca72e 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -122,15 +122,13 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
static int ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
- struct resource *res;
int err;
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
if (!ep93xx_rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep93xx_rtc->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ ep93xx_rtc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ep93xx_rtc->mmio_base))
return PTR_ERR(ep93xx_rtc->mmio_base);
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 8df2075af9a2..9e6e994cce99 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -180,10 +180,7 @@ static int ftm_rtc_alarm_irq_enable(struct device *dev,
*/
static int ftm_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- rtc_time_to_tm(ts64.tv_sec, tm);
+ rtc_time64_to_tm(ktime_get_real_seconds(), tm);
return 0;
}
@@ -206,16 +203,14 @@ static int ftm_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
*/
static int ftm_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- struct rtc_time tm;
- unsigned long now, alm_time, cycle;
+ time64_t alm_time;
+ unsigned long long cycle;
struct ftm_rtc *rtc = dev_get_drvdata(dev);
- ftm_rtc_read_time(dev, &tm);
- rtc_tm_to_time(&tm, &now);
- rtc_tm_to_time(&alm->time, &alm_time);
+ alm_time = rtc_tm_to_time64(&alm->time);
ftm_clean_alarm(rtc);
- cycle = (alm_time - now) * rtc->alarm_freq;
+ cycle = (alm_time - ktime_get_real_seconds()) * rtc->alarm_freq;
if (cycle > MAX_COUNT_VAL) {
pr_err("Out of alarm range {0~262} seconds.\n");
return -ERANGE;
@@ -248,7 +243,6 @@ static const struct rtc_class_ops ftm_rtc_ops = {
static int ftm_rtc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
int irq;
int ret;
struct ftm_rtc *rtc;
@@ -265,13 +259,7 @@ static int ftm_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "cannot get resource for rtc\n");
- return -ENODEV;
- }
-
- rtc->base = devm_ioremap_resource(&pdev->dev, r);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base)) {
dev_err(&pdev->dev, "cannot ioremap resource for rtc\n");
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index 1a3420ee6a4d..cb6b0ad7ec3f 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -165,7 +165,6 @@ static const struct rtc_class_ops goldfish_rtc_ops = {
static int goldfish_rtc_probe(struct platform_device *pdev)
{
struct goldfish_rtc *rtcdrv;
- struct resource *r;
int err;
rtcdrv = devm_kzalloc(&pdev->dev, sizeof(*rtcdrv), GFP_KERNEL);
@@ -173,12 +172,7 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, rtcdrv);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- rtcdrv->base = devm_ioremap_resource(&pdev->dev, r);
+ rtcdrv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtcdrv->base))
return -ENODEV;
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 3089645e0ce8..18023e472cbc 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -307,7 +307,6 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_rtc *rtc;
- struct resource *mem;
const struct platform_device_id *id = platform_get_device_id(pdev);
const struct of_device_id *of_id = of_match_device(
jz4740_rtc_of_match, &pdev->dev);
@@ -326,8 +325,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (rtc->irq < 0)
return -ENOENT;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, mem);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
index a8bb15606ec8..00ef16ba9480 100644
--- a/drivers/rtc/rtc-lpc24xx.c
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -194,15 +194,13 @@ static const struct rtc_class_ops lpc24xx_rtc_ops = {
static int lpc24xx_rtc_probe(struct platform_device *pdev)
{
struct lpc24xx_rtc *rtc;
- struct resource *res;
int irq, ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->rtc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtc_base))
return PTR_ERR(rtc->rtc_base);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index ac393230e592..15d8abda81fe 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -185,7 +185,6 @@ static const struct rtc_class_ops lpc32xx_rtc_ops = {
static int lpc32xx_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct lpc32xx_rtc *rtc;
int err;
u32 tmp;
@@ -194,8 +193,7 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc))
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->rtc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtc_base))
return PTR_ERR(rtc->rtc_base);
@@ -266,16 +264,6 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int lpc32xx_rtc_remove(struct platform_device *pdev)
-{
- struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
-
- if (rtc->irq >= 0)
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int lpc32xx_rtc_suspend(struct device *dev)
{
@@ -357,7 +345,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
- .remove = lpc32xx_rtc_remove,
.driver = {
.name = "rtc-lpc32xx",
.pm = LPC32XX_RTC_PM_OPS,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index f9fa4f068de3..9b70b371bd0c 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -235,9 +235,6 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[8];
int err, flags;
- if (tm->tm_year < 100 || tm->tm_year > 199)
- return -EINVAL;
-
buf[M41T80_REG_SSEC] = 0;
buf[M41T80_REG_SEC] = bin2bcd(tm->tm_sec);
buf[M41T80_REG_MIN] = bin2bcd(tm->tm_min);
@@ -705,7 +702,6 @@ static ssize_t wdt_read(struct file *file, char __user *buf,
/**
* wdt_ioctl:
- * @inode: inode of the device
* @file: file handle to the device
* @cmd: watchdog command
* @arg: argument pointer
@@ -926,6 +922,8 @@ static int m41t80_probe(struct i2c_client *client,
}
m41t80_data->rtc->ops = &m41t80_rtc_ops;
+ m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq <= 0) {
/* We cannot support UIE mode if we do not have an IRQ line */
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 59b54ed9b841..75a0e73071d8 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -218,7 +218,6 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
- struct resource *res;
unsigned char reg;
int err;
struct nvmem_config m48t86_nvmem_cfg = {
@@ -235,17 +234,11 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- info->index_reg = devm_ioremap_resource(&pdev->dev, res);
+ info->index_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->index_reg))
return PTR_ERR(info->index_reg);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -ENODEV;
- info->data_reg = devm_ioremap_resource(&pdev->dev, res);
+ info->data_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(info->data_reg))
return PTR_ERR(info->data_reg);
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 2ecd8752b088..df2829dd55ad 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -172,7 +172,20 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_X86
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0x17) ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
+ RTC_FREQ_SELECT);
+ save_freq_select &= ~RTC_DIV_RESET2;
+ } else
+ CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
+ RTC_FREQ_SELECT);
+#else
+ CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
+#endif
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index e08b981dfc21..47ebcf834cc2 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -131,7 +131,7 @@ static u32 meson_rtc_get_data(struct meson_rtc *rtc)
static int meson_rtc_get_bus(struct meson_rtc *rtc)
{
- int ret, retries = 3;
+ int ret, retries;
u32 val;
/* prepare bus for transfers, set all lines low */
@@ -292,7 +292,6 @@ static int meson_rtc_probe(struct platform_device *pdev)
};
struct device *dev = &pdev->dev;
struct meson_rtc *rtc;
- struct resource *res;
void __iomem *base;
int ret;
u32 tm;
@@ -312,8 +311,7 @@ static int meson_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &meson_rtc_ops;
rtc->rtc->range_max = U32_MAX;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 1c2d3c4a4963..80e364baac53 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -88,28 +88,16 @@ static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
__raw_writel(val, &priv->regs[reg]);
}
-static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
- unsigned int reg)
-{
- msm6242_write(priv, msm6242_read(priv, reg) | val, reg);
-}
-
-static inline void msm6242_clear(struct msm6242_priv *priv, unsigned int val,
- unsigned int reg)
-{
- msm6242_write(priv, msm6242_read(priv, reg) & ~val, reg);
-}
-
static void msm6242_lock(struct msm6242_priv *priv)
{
int cnt = 5;
- msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
while ((msm6242_read(priv, MSM6242_CD) & MSM6242_CD_BUSY) && cnt) {
- msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
udelay(70);
- msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
cnt--;
}
@@ -120,7 +108,7 @@ static void msm6242_lock(struct msm6242_priv *priv)
static void msm6242_unlock(struct msm6242_priv *priv)
{
- msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
}
static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
@@ -133,7 +121,8 @@ static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
msm6242_read(priv, MSM6242_SECOND1);
tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
msm6242_read(priv, MSM6242_MINUTE1);
- tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 +
+ tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) &
+ MSM6242_HOUR10_HR_MASK) * 10 +
msm6242_read(priv, MSM6242_HOUR1);
tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
msm6242_read(priv, MSM6242_DAY1);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
index 16bd26b5aa6f..f1e356394814 100644
--- a/drivers/rtc/rtc-mt7622.c
+++ b/drivers/rtc/rtc-mt7622.c
@@ -303,7 +303,6 @@ MODULE_DEVICE_TABLE(of, mtk_rtc_match);
static int mtk_rtc_probe(struct platform_device *pdev)
{
struct mtk_rtc *hw;
- struct resource *res;
int ret;
hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
@@ -312,8 +311,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->base = devm_ioremap_resource(&pdev->dev, res);
+ hw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->base))
return PTR_ERR(hw->base);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index ab9db57a6834..d5f190e578e4 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -212,7 +212,6 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
static int __init mv_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct rtc_plat_data *pdata;
u32 rtc_time;
int ret = 0;
@@ -221,8 +220,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index a2941c875a06..988a4dfcfaf8 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -727,7 +727,6 @@ static struct nvmem_config omap_rtc_nvmem_config = {
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
- struct resource *res;
u8 reg, mask, new_ctrl;
const struct platform_device_id *id_entry;
const struct of_device_id *of_id;
@@ -764,8 +763,7 @@ static int omap_rtc_probe(struct platform_device *pdev)
if (!IS_ERR(rtc->clk))
clk_prepare_enable(rtc->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base)) {
clk_disable_unprepare(rtc->clk);
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 02b069caffd5..ba5baaca47be 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -417,6 +417,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
const char *name, bool has_nvmem)
{
struct pcf2127 *pcf2127;
+ u32 wdd_timeout;
int ret = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -459,7 +460,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
/*
* Watchdog timer enabled and reset pin /RST activated when timed out.
* Select 1Hz clock source for watchdog timer.
- * Timer is not started until WD_VAL is loaded with a valid value.
* Note: Countdown timer disabled and not available.
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_WD_CTL,
@@ -475,6 +475,14 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return ret;
}
+ /* Test if watchdog timer is started by bootloader */
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
+ if (ret)
+ return ret;
+
+ if (wdd_timeout)
+ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+
#ifdef CONFIG_WATCHDOG
ret = devm_watchdog_register_device(dev, &pcf2127->wdd);
if (ret)
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 2f435e533b10..b24c908f5f06 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -35,10 +35,6 @@
#define REG_OFFSET 0x0e
#define REG_OFFSET_MODE BIT(7)
-struct pcf8523 {
- struct rtc_device *rtc;
-};
-
static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
{
struct i2c_msg msgs[2];
@@ -345,16 +341,12 @@ static const struct rtc_class_ops pcf8523_rtc_ops = {
static int pcf8523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pcf8523 *pcf;
+ struct rtc_device *rtc;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
- if (!pcf)
- return -ENOMEM;
-
err = pcf8523_load_capacitance(client);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
@@ -364,12 +356,10 @@ static int pcf8523_probe(struct i2c_client *client,
if (err < 0)
return err;
- pcf->rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
+ rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
&pcf8523_rtc_ops, THIS_MODULE);
- if (IS_ERR(pcf->rtc))
- return PTR_ERR(pcf->rtc);
-
- i2c_set_clientdata(client, pcf);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 24baa4767b11..3c322f3079b0 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -390,7 +390,7 @@ static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
#define clkout_hw_to_pcf8563(_hw) container_of(_hw, struct pcf8563, clkout_hw)
-static int clkout_rates[] = {
+static const int clkout_rates[] = {
32768,
1024,
32,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 17653ed52ebb..2b6946744654 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -298,7 +298,6 @@ static int pic32_rtc_remove(struct platform_device *pdev)
static int pic32_rtc_probe(struct platform_device *pdev)
{
struct pic32_rtc_dev *pdata;
- struct resource *res;
int ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
@@ -311,8 +310,7 @@ static int pic32_rtc_probe(struct platform_device *pdev)
if (pdata->alarm_irq < 0)
return pdata->alarm_irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base))
return PTR_ERR(pdata->reg_base);
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index f5a30e0f16c2..07ea1be3abb9 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -49,7 +49,7 @@ struct pm8xxx_rtc_regs {
* @regmap: regmap used to access RTC registers
* @allow_set_time: indicates whether writing to the RTC is allowed
* @rtc_alarm_irq: rtc alarm irq number.
- * @ctrl_reg: rtc control register.
+ * @regs: rtc registers description.
* @rtc_dev: device structure.
* @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
*/
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
index 2498278853af..aaf1b95e3990 100644
--- a/drivers/rtc/rtc-r7301.c
+++ b/drivers/rtc/rtc-r7301.c
@@ -354,21 +354,16 @@ static void rtc7301_init(struct rtc7301_priv *priv)
static int __init rtc7301_rtc_probe(struct platform_device *dev)
{
- struct resource *res;
void __iomem *regs;
struct rtc7301_priv *priv;
struct rtc_device *rtc;
int ret;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- regs = devm_ioremap_resource(&dev->dev, res);
+ regs = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/rtc/rtc-rtd119x.c b/drivers/rtc/rtc-rtd119x.c
index b233559d950b..bb98f2d574a5 100644
--- a/drivers/rtc/rtc-rtd119x.c
+++ b/drivers/rtc/rtc-rtd119x.c
@@ -167,7 +167,6 @@ static const struct of_device_id rtd119x_rtc_dt_ids[] = {
static int rtd119x_rtc_probe(struct platform_device *pdev)
{
struct rtd119x_rtc *data;
- struct resource *res;
u32 val;
int ret;
@@ -178,8 +177,7 @@ static int rtd119x_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->base_year = 2014;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 2b316661a578..6b7b3a69601a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/clk-provider.h>
#include <linux/bcd.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
@@ -52,6 +53,11 @@
#define RV3028_STATUS_CLKF BIT(6)
#define RV3028_STATUS_EEBUSY BIT(7)
+#define RV3028_CLKOUT_FD_MASK GENMASK(2, 0)
+#define RV3028_CLKOUT_PORIE BIT(3)
+#define RV3028_CLKOUT_CLKSY BIT(6)
+#define RV3028_CLKOUT_CLKOE BIT(7)
+
#define RV3028_CTRL1_EERD BIT(3)
#define RV3028_CTRL1_WADA BIT(5)
@@ -84,6 +90,9 @@ struct rv3028_data {
struct regmap *regmap;
struct rtc_device *rtc;
enum rv3028_type type;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
};
static u16 rv3028_trickle_resistors[] = {1000, 3000, 6000, 11000};
@@ -581,6 +590,140 @@ restore_eerd:
return ret;
}
+#ifdef CONFIG_COMMON_CLK
+#define clkout_hw_to_rv3028(hw) container_of(hw, struct rv3028_data, clkout_hw)
+
+static int clkout_rates[] = {
+ 32768,
+ 8192,
+ 1024,
+ 64,
+ 32,
+ 1,
+};
+
+static unsigned long rv3028_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ int clkout, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_read(rv3028->regmap, RV3028_CLKOUT, &clkout);
+ if (ret < 0)
+ return 0;
+
+ clkout &= RV3028_CLKOUT_FD_MASK;
+ return clkout_rates[clkout];
+}
+
+static long rv3028_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int rv3028_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_write(rv3028->regmap, RV3028_CLKOUT, 0x0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) {
+ if (clkout_rates[i] == rate) {
+ ret = regmap_update_bits(rv3028->regmap,
+ RV3028_CLKOUT,
+ RV3028_CLKOUT_FD_MASK, i);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(rv3028->regmap, RV3028_CLKOUT,
+ RV3028_CLKOUT_CLKSY | RV3028_CLKOUT_CLKOE);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int rv3028_clkout_prepare(struct clk_hw *hw)
+{
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ return regmap_write(rv3028->regmap, RV3028_CLKOUT,
+ RV3028_CLKOUT_CLKSY | RV3028_CLKOUT_CLKOE);
+}
+
+static void rv3028_clkout_unprepare(struct clk_hw *hw)
+{
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ regmap_write(rv3028->regmap, RV3028_CLKOUT, 0x0);
+ regmap_update_bits(rv3028->regmap, RV3028_STATUS,
+ RV3028_STATUS_CLKF, 0);
+}
+
+static int rv3028_clkout_is_prepared(struct clk_hw *hw)
+{
+ int clkout, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_read(rv3028->regmap, RV3028_CLKOUT, &clkout);
+ if (ret < 0)
+ return ret;
+
+ return !!(clkout & RV3028_CLKOUT_CLKOE);
+}
+
+static const struct clk_ops rv3028_clkout_ops = {
+ .prepare = rv3028_clkout_prepare,
+ .unprepare = rv3028_clkout_unprepare,
+ .is_prepared = rv3028_clkout_is_prepared,
+ .recalc_rate = rv3028_clkout_recalc_rate,
+ .round_rate = rv3028_clkout_round_rate,
+ .set_rate = rv3028_clkout_set_rate,
+};
+
+static int rv3028_clkout_register_clk(struct rv3028_data *rv3028,
+ struct i2c_client *client)
+{
+ int ret;
+ struct clk *clk;
+ struct clk_init_data init;
+ struct device_node *node = client->dev.of_node;
+
+ ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS,
+ RV3028_STATUS_CLKF, 0);
+ if (ret < 0)
+ return ret;
+
+ init.name = "rv3028-clkout";
+ init.ops = &rv3028_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ rv3028->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&client->dev, &rv3028->clkout_hw);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return 0;
+}
+#endif
+
static struct rtc_class_ops rv3028_rtc_ops = {
.read_time = rv3028_get_time,
.set_time = rv3028_set_time,
@@ -708,6 +851,9 @@ static int rv3028_probe(struct i2c_client *client)
rv3028->rtc->max_user_freq = 1;
+#ifdef CONFIG_COMMON_CLK
+ rv3028_clkout_register_clk(rv3028, client);
+#endif
return 0;
}
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 71e20a6bd387..3a9eb7043f01 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the Epson RTC module RX-6110 SA
*
* Copyright(C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright(C) SEIKO EPSON CORPORATION 2013. All rights reserved.
- *
- * This driver software is distributed as is, without any warranty of any kind,
- * either express or implied as further specified in the GNU Public License.
- * This software may be used and distributed according to the terms of the GNU
- * Public License, version 2 as published by the Free Software Foundation.
- * See the file COPYING in the main directory of this archive for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bcd.h>
@@ -370,11 +362,6 @@ static int rx6110_probe(struct spi_device *spi)
return 0;
}
-static int rx6110_remove(struct spi_device *spi)
-{
- return 0;
-}
-
static const struct spi_device_id rx6110_id[] = {
{ "rx6110", 0 },
{ }
@@ -393,7 +380,6 @@ static struct spi_driver rx6110_driver = {
.of_match_table = of_match_ptr(rx6110_spi_of_match),
},
.probe = rx6110_probe,
- .remove = rx6110_remove,
.id_table = rx6110_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index da34cfd70f95..03672a246356 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -423,8 +423,6 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
.ioctl = s35390a_rtc_ioctl,
};
-static struct i2c_driver s35390a_driver;
-
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -456,6 +454,10 @@ static int s35390a_probe(struct i2c_client *client,
}
}
+ s35390a->rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(s35390a->rtc))
+ return PTR_ERR(s35390a->rtc);
+
err_read = s35390a_read_status(s35390a, &status1);
if (err_read < 0) {
dev_err(dev, "error resetting chip\n");
@@ -485,11 +487,9 @@ static int s35390a_probe(struct i2c_client *client,
device_set_wakeup_capable(dev, 1);
- s35390a->rtc = devm_rtc_device_register(dev, s35390a_driver.driver.name,
- &s35390a_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(s35390a->rtc))
- return PTR_ERR(s35390a->rtc);
+ s35390a->rtc->ops = &s35390a_rtc_ops;
+ s35390a->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ s35390a->rtc->range_max = RTC_TIMESTAMP_END_2099;
/* supports per-minute alarms only, therefore set uie_unsupported */
s35390a->rtc->uie_unsupported = 1;
@@ -497,7 +497,7 @@ static int s35390a_probe(struct i2c_client *client,
if (status1 & S35390A_FLAG_INT2)
rtc_update_irq(s35390a->rtc, 1, RTC_AF);
- return 0;
+ return rtc_register_device(s35390a->rtc);
}
static struct i2c_driver s35390a_driver = {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7801249c254b..e1b50e682fc4 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -444,7 +444,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
{
struct s3c_rtc *info = NULL;
struct rtc_time rtc_tm;
- struct resource *res;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -475,8 +474,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
info->irq_tick, info->irq_alarm);
/* get the memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->base = devm_ioremap_resource(&pdev->dev, res);
+ info->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 86fa723b3b76..d37893f6eaee 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -252,7 +252,6 @@ EXPORT_SYMBOL_GPL(sa1100_rtc_init);
static int sa1100_rtc_probe(struct platform_device *pdev)
{
struct sa1100_rtc *info;
- struct resource *iores;
void __iomem *base;
int irq_1hz, irq_alarm;
int ret;
@@ -281,8 +280,7 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
return ret;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, iores);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index b95676899750..36810dd40cd3 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -661,12 +661,6 @@ static int sprd_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int sprd_rtc_remove(struct platform_device *pdev)
-{
- device_init_wakeup(&pdev->dev, 0);
- return 0;
-}
-
static const struct of_device_id sprd_rtc_of_match[] = {
{ .compatible = "sprd,sc2731-rtc", },
{ },
@@ -679,7 +673,6 @@ static struct platform_driver sprd_rtc_driver = {
.of_match_table = sprd_rtc_of_match,
},
.probe = sprd_rtc_probe,
- .remove = sprd_rtc_remove,
};
module_platform_driver(sprd_rtc_driver);
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index c759c55359a1..a2c9c55667cd 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -365,13 +365,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int sirfsoc_rtc_remove(struct platform_device *pdev)
-{
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int sirfsoc_rtc_suspend(struct device *dev)
{
@@ -450,7 +443,6 @@ static struct platform_driver sirfsoc_rtc_driver = {
.of_match_table = sirfsoc_rtc_of_match,
},
.probe = sirfsoc_rtc_probe,
- .remove = sirfsoc_rtc_remove,
};
module_platform_driver(sirfsoc_rtc_driver);
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 9f23b24f466c..833daeb7b60e 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -347,7 +347,6 @@ static const struct rtc_class_ops spear_rtc_ops = {
static int spear_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct spear_rtc_config *config;
int status = 0;
int irq;
@@ -369,8 +368,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
return status;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ config->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(config->ioaddr))
return PTR_ERR(config->ioaddr);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 49474a31c66d..51041dc08af4 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -41,7 +41,6 @@
struct st_rtc {
struct rtc_device *rtc_dev;
struct rtc_wkalrm alarm;
- struct resource *res;
struct clk *clk;
unsigned long clkrate;
void __iomem *ioaddr;
@@ -186,7 +185,6 @@ static int st_rtc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct st_rtc *rtc;
- struct resource *res;
uint32_t mode;
int ret = 0;
@@ -210,8 +208,7 @@ static int st_rtc_probe(struct platform_device *pdev)
spin_lock_init(&rtc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ rtc->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->ioaddr))
return PTR_ERR(rtc->ioaddr);
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index a833ebc4ecb9..01a45044f468 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -256,7 +256,6 @@ static int stk17ta8_nvram_write(void *priv, unsigned int pos, void *val,
static int stk17ta8_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
unsigned int cal;
unsigned int flags;
struct rtc_plat_data *pdata;
@@ -275,8 +274,7 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 2999e33a7e37..781cabb2afca 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -693,15 +693,13 @@ static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
const struct stm32_rtc_registers *regs;
- struct resource *res;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 5e2bd9f1d01e..8dcd20b34dde 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -136,7 +136,6 @@ struct sun6i_rtc_clk_data {
struct sun6i_rtc_dev {
struct rtc_device *rtc;
- struct device *dev;
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
@@ -669,7 +668,6 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
return -ENODEV;
platform_set_drvdata(pdev, chip);
- chip->dev = &pdev->dev;
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index 9b6f2483c1c6..f5d7f44550ce 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -422,7 +422,6 @@ MODULE_DEVICE_TABLE(of, sunxi_rtc_dt_ids);
static int sunxi_rtc_probe(struct platform_device *pdev)
{
struct sunxi_rtc_dev *chip;
- struct resource *res;
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
@@ -436,8 +435,7 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(&pdev->dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 69d695bf9500..7fbb1741692f 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -103,7 +103,7 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long flags;
- u32 sec, msec;
+ u32 sec;
/*
* RTC hardware copies seconds to shadow seconds when a read of
@@ -111,7 +111,7 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
spin_lock_irqsave(&info->lock, flags);
- msec = readl(info->base + TEGRA_RTC_REG_MILLI_SECONDS);
+ readl(info->base + TEGRA_RTC_REG_MILLI_SECONDS);
sec = readl(info->base + TEGRA_RTC_REG_SHADOW_SECONDS);
spin_unlock_irqrestore(&info->lock, flags);
@@ -277,15 +277,13 @@ MODULE_DEVICE_TABLE(of, tegra_rtc_dt_match);
static int tegra_rtc_probe(struct platform_device *pdev)
{
struct tegra_rtc_info *info;
- struct resource *res;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->base = devm_ioremap_resource(&pdev->dev, res);
+ info->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 2c0467a9e717..e3840386f430 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -361,6 +361,13 @@ static const struct rtc_class_ops tps65910_rtc_ops = {
.set_offset = tps65910_set_offset,
};
+static const struct rtc_class_ops tps65910_rtc_ops_noirq = {
+ .read_time = tps65910_rtc_read_time,
+ .set_time = tps65910_rtc_set_time,
+ .read_offset = tps65910_read_offset,
+ .set_offset = tps65910_set_offset,
+};
+
static int tps65910_rtc_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = NULL;
@@ -414,14 +421,16 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
dev_name(&pdev->dev), &pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "IRQ is not free.\n");
- return ret;
- }
+ if (ret < 0)
+ irq = -1;
+
tps_rtc->irq = irq;
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (irq != -1) {
+ device_set_wakeup_capable(&pdev->dev, 1);
+ tps_rtc->rtc->ops = &tps65910_rtc_ops;
+ } else
+ tps_rtc->rtc->ops = &tps65910_rtc_ops_noirq;
- tps_rtc->rtc->ops = &tps65910_rtc_ops;
tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 5a29915a06ec..715b82981279 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -236,7 +236,6 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct tx4939rtc_plat_data *pdata;
- struct resource *res;
int irq, ret;
struct nvmem_config nvmem_cfg = {
.name = "tx4939_nvram",
@@ -253,8 +252,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->rtcreg = devm_ioremap_resource(&pdev->dev, res);
+ pdata->rtcreg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->rtcreg))
return PTR_ERR(pdata->rtcreg);
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 63ffba21397b..d2da92187d56 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -284,7 +284,6 @@ static int rtc_probe(struct platform_device *pdev)
struct v3020 *chip;
int retval = -EBUSY;
int i;
- int temp;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -302,7 +301,7 @@ static int rtc_probe(struct platform_device *pdev)
/* Make sure the v3020 expects a communication cycle
* by reading 8 times */
for (i = 0; i < 8; i++)
- temp = chip->ops->read_bit(chip);
+ chip->ops->read_bit(chip);
/* Test chip by doing a write/read sequence
* to the chip ram */
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index d5d14cf86e0d..e2588625025f 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -122,12 +122,6 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
- if (tm->tm_year < 100) {
- dev_warn(dev, "Only years 2000-2199 are supported by the "
- "hardware!\n");
- return -EINVAL;
- }
-
writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
| (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
| (bin2bcd(tm->tm_mday))
@@ -200,7 +194,6 @@ static const struct rtc_class_ops vt8500_rtc_ops = {
static int vt8500_rtc_probe(struct platform_device *pdev)
{
struct vt8500_rtc *vt8500_rtc;
- struct resource *res;
int ret;
vt8500_rtc = devm_kzalloc(&pdev->dev,
@@ -215,8 +208,7 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
if (vt8500_rtc->irq_alarm < 0)
return vt8500_rtc->irq_alarm;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vt8500_rtc->regbase = devm_ioremap_resource(&pdev->dev, res);
+ vt8500_rtc->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vt8500_rtc->regbase))
return PTR_ERR(vt8500_rtc->regbase);
@@ -224,27 +216,23 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
writel(VT8500_RTC_CR_ENABLE,
vt8500_rtc->regbase + VT8500_RTC_CR);
- vt8500_rtc->rtc = devm_rtc_device_register(&pdev->dev, "vt8500-rtc",
- &vt8500_rtc_ops, THIS_MODULE);
- if (IS_ERR(vt8500_rtc->rtc)) {
- ret = PTR_ERR(vt8500_rtc->rtc);
- dev_err(&pdev->dev,
- "Failed to register RTC device -> %d\n", ret);
- goto err_return;
- }
+ vt8500_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(vt8500_rtc->rtc))
+ return PTR_ERR(vt8500_rtc->rtc);
+
+ vt8500_rtc->rtc->ops = &vt8500_rtc_ops;
+ vt8500_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ vt8500_rtc->rtc->range_max = RTC_TIMESTAMP_END_2199;
ret = devm_request_irq(&pdev->dev, vt8500_rtc->irq_alarm,
vt8500_rtc_irq, 0, "rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
- goto err_return;
+ return ret;
}
- return 0;
-
-err_return:
- return ret;
+ return rtc_register_device(vt8500_rtc->rtc);
}
static int vt8500_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
index 8ad4c4e6d557..ff46066a68a4 100644
--- a/drivers/rtc/rtc-wilco-ec.c
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -110,10 +110,12 @@ static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
tm->tm_mday = rtc.day;
tm->tm_mon = rtc.month - 1;
tm->tm_year = rtc.year + (rtc.century * 100) - 1900;
- tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+ /* Ignore other tm fields, man rtc says userspace shouldn't use them. */
- /* Don't compute day of week, we don't need it. */
- tm->tm_wday = -1;
+ if (rtc_valid_tm(tm)) {
+ dev_err(dev, "Time from RTC is invalid: %ptRr\n", tm);
+ return -EIO;
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 9683fbf7c78d..96db441f92b3 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -34,7 +34,6 @@
struct xgene_rtc_dev {
struct rtc_device *rtc;
- struct device *dev;
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
@@ -137,7 +136,6 @@ static irqreturn_t xgene_rtc_interrupt(int irq, void *id)
static int xgene_rtc_probe(struct platform_device *pdev)
{
struct xgene_rtc_dev *pdata;
- struct resource *res;
int ret;
int irq;
@@ -145,10 +143,8 @@ static int xgene_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- pdata->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->csr_base))
return PTR_ERR(pdata->csr_base);
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 2c762757fb54..539690568298 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -44,7 +44,7 @@ struct xlnx_rtc_dev {
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- int calibval;
+ unsigned int calibval;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -195,7 +195,6 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
static int xlnx_rtc_probe(struct platform_device *pdev)
{
struct xlnx_rtc_dev *xrtcdev;
- struct resource *res;
int ret;
xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL);
@@ -211,9 +210,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
xrtcdev->rtc->ops = &xlnx_rtc_ops;
xrtcdev->rtc->range_max = U32_MAX;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- xrtcdev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ xrtcdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xrtcdev->reg_base))
return PTR_ERR(xrtcdev->reg_base);
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index be3531e7f868..b7ca7d79fb28 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -103,8 +103,11 @@ static DEVICE_ATTR_RW(max_user_freq);
/**
* rtc_sysfs_show_hctosys - indicate if the given RTC set the system time
+ * @dev: The device that the attribute belongs to.
+ * @attr: The attribute being read.
+ * @buf: The result buffer.
*
- * Returns 1 if the system clock was set by this RTC at the last
+ * buf is "1" if the system clock was set by this RTC at the last
* boot or resume event.
*/
static ssize_t
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index f34ee41cbed8..4f4dd9d727c9 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -61,6 +61,7 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
@@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
case REP82_ERROR_INVALID_SPECIAL_CMD:
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index 9dda431ec8f3..352056eb0dd1 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -5,6 +5,6 @@
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
- zfcp_unit.o
+ zfcp_unit.o zfcp_diag.o
obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e390f8c6d5f3..09ec846fe01d 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
/*
@@ -25,6 +25,7 @@
* Martin Petermann
* Sven Schuetz
* Steffen Maier
+ * Benjamin Block
*/
#define KMSG_COMPONENT "zfcp"
@@ -36,6 +37,7 @@
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -356,6 +358,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->erp_action.adapter = adapter;
+ if (zfcp_diag_adapter_setup(adapter))
+ goto failed;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -402,6 +407,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ if (zfcp_diag_sysfs_setup(adapter))
+ goto failed;
+
/* report size limit per scatter-gather segment */
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
@@ -426,6 +434,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
+ zfcp_diag_sysfs_destroy(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
@@ -449,6 +458,7 @@ void zfcp_adapter_release(struct kref *ref)
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
+ zfcp_diag_adapter_free(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index dccdb41bed8c..1234294700c4 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -95,11 +95,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
- if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
- rec->pl_len = q_head->log_length;
- zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
- rec->pl_len, "fsf_res", req->req_id);
- }
+ rec->pl_len = q_head->log_length;
+ zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+ rec->pl_len, "fsf_res", req->req_id);
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 87d2f47a6990..8cc0eefe4ccc 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_DEF_H
@@ -86,6 +86,7 @@
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
+#define ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE 0x00020000
/************************* STRUCTURE DEFINITIONS *****************************/
@@ -197,6 +198,7 @@ struct zfcp_adapter {
struct device_dma_parameters dma_parms;
struct zfcp_fc_events events;
unsigned long next_port_scan;
+ struct zfcp_diag_adapter *diagnostics;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_diag.c b/drivers/s390/scsi/zfcp_diag.c
new file mode 100644
index 000000000000..67a8f4e57db1
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Functions to handle diagnostics.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/kernfs.h>
+#include <linux/sysfs.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "zfcp_diag.h"
+#include "zfcp_ext.h"
+#include "zfcp_def.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait);
+
+/**
+ * zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics.
+ * @adapter: the adapter to setup diagnostics for.
+ *
+ * Creates the data-structures to store the diagnostics for an adapter. This
+ * overwrites whatever was stored before at &zfcp_adapter->diagnostics!
+ *
+ * Return:
+ * * 0 - Everyting is OK
+ * * -ENOMEM - Could not allocate all/parts of the data-structures;
+ * &zfcp_adapter->diagnostics remains unchanged
+ */
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter *diag;
+ struct zfcp_diag_header *hdr;
+
+ diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+ if (diag == NULL)
+ return -ENOMEM;
+
+ diag->max_age = (5 * 1000); /* default value: 5 s */
+
+ /* setup header for port_data */
+ hdr = &diag->port_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->port_data.data;
+ hdr->buffer_size = sizeof(diag->port_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ /* setup header for config_data */
+ hdr = &diag->config_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->config_data.data;
+ hdr->buffer_size = sizeof(diag->config_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ adapter->diagnostics = diag;
+ return 0;
+}
+
+/**
+ * zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations.
+ * @adapter: the adapter whose diagnostic structures should be freed.
+ *
+ * Frees all data-structures in the given adapter that store diagnostics
+ * information. Can savely be called with partially setup diagnostics.
+ */
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
+{
+ kfree(adapter->diagnostics);
+ adapter->diagnostics = NULL;
+}
+
+/**
+ * zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter to which the group should be added.
+ *
+ * Return: 0 on success; Something else otherwise (see sysfs_create_group()).
+ */
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
+{
+ int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+ if (rc == 0)
+ adapter->diagnostics->sysfs_established = 1;
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter from which the group should be removed.
+ */
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
+{
+ if (adapter->diagnostics == NULL ||
+ !adapter->diagnostics->sysfs_established)
+ return;
+
+ /*
+ * We need this state-handling so we can prevent warnings being printed
+ * on the kernel-console in case we have to abort a halfway done
+ * zfcp_adapter_enqueue(), in which the sysfs-group was not yet
+ * established. sysfs_remove_group() does this checking as well, but
+ * still prints a warning in case we try to remove a group that has not
+ * been established before
+ */
+ adapter->diagnostics->sysfs_established = 0;
+ sysfs_remove_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+}
+
+
+/**
+ * zfcp_diag_update_xdata() - Update a diagnostics buffer.
+ * @hdr: the meta data to update.
+ * @data: data to use for the update.
+ * @incomplete: flag stating whether the data in @data is incomplete.
+ */
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete)
+{
+ const unsigned long capture_timestamp = jiffies;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ /* make sure we never go into the past with an update */
+ if (!time_after_eq(capture_timestamp, hdr->timestamp))
+ goto out;
+
+ hdr->timestamp = capture_timestamp;
+ hdr->incomplete = incomplete;
+ memcpy(hdr->buffer, data, hdr->buffer_size);
+out:
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+}
+
+/**
+ * zfcp_diag_update_port_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Port Data.
+ * @adapter: Adapter to collect Port Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_port_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_update_config_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Config Data.
+ * @adapter: Adapter to collect Config Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_config_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */
+
+ return rc;
+}
+
+static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update,
+ unsigned long *const flags)
+ __must_hold(hdr->access_lock)
+{
+ int rc;
+
+ if (hdr->updating == 1) {
+ rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait,
+ hdr->updating == 0,
+ hdr->access_lock);
+ rc = (rc == 0 ? -EAGAIN : -EINTR);
+ } else {
+ hdr->updating = 1;
+ spin_unlock_irqrestore(&hdr->access_lock, *flags);
+
+ /* unlocked, because update function sleeps */
+ rc = buffer_update(adapter);
+
+ spin_lock_irqsave(&hdr->access_lock, *flags);
+ hdr->updating = 0;
+
+ /*
+ * every thread waiting here went via an interruptible wait,
+ * so its fine to only wake those
+ */
+ wake_up_interruptible_all(&__zfcp_diag_publish_wait);
+ }
+
+ return rc;
+}
+
+static bool
+__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag,
+ const struct zfcp_diag_header *const hdr)
+ __must_hold(hdr->access_lock)
+{
+ const unsigned long now = jiffies;
+
+ /*
+ * Should not happen (data is from the future).. if it does, still
+ * signal that it needs refresh
+ */
+ if (!time_after_eq(now, hdr->timestamp))
+ return false;
+
+ if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age)
+ return false;
+
+ return true;
+}
+
+/**
+ * zfcp_diag_update_buffer_limited() - Collect diagnostics and update a
+ * diagnostics buffer rate limited.
+ * @adapter: Adapter to collect the diagnostics from.
+ * @hdr: buffer-header for which to update with the collected diagnostics.
+ * @buffer_update: Specific implementation for collecting and updating.
+ *
+ * This function will cause an update of the given @hdr by calling the also
+ * given @buffer_update function. If called by multiple sources at the same
+ * time, it will synchornize the update by only allowing one source to call
+ * @buffer_update and the others to wait for that source to complete instead
+ * (the wait is interruptible).
+ *
+ * Additionally this version is rate-limited and will only exit if either the
+ * buffer is fresh enough (within the limit) - it will do nothing if the buffer
+ * is fresh enough to begin with -, or if the source/thread that started this
+ * update is the one that made the update (to prevent endless loops).
+ *
+ * Return:
+ * * 0 - If the update was successfully published and/or the buffer is
+ * fresh enough
+ * * -EINTR - If the thread went into the wait-state and was interrupted
+ * * whatever @buffer_update returns
+ */
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ for (rc = 0;
+ !__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr);
+ rc = 0) {
+ rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update,
+ &flags);
+ if (rc != -EAGAIN)
+ break;
+ }
+
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+
+ return rc;
+}
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
new file mode 100644
index 000000000000..b9c93d15f67c
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Definitions for handling diagnostics in the the zfcp device driver.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef ZFCP_DIAG_H
+#define ZFCP_DIAG_H
+
+#include <linux/spinlock.h>
+
+#include "zfcp_fsf.h"
+#include "zfcp_def.h"
+
+/**
+ * struct zfcp_diag_header - general part of a diagnostic buffer.
+ * @access_lock: lock protecting all the data in this buffer.
+ * @updating: flag showing that an update for this buffer is currently running.
+ * @incomplete: flag showing that the data in @buffer is incomplete.
+ * @timestamp: time in jiffies when the data of this buffer was last captured.
+ * @buffer: implementation-depending data of this buffer
+ * @buffer_size: size of @buffer
+ */
+struct zfcp_diag_header {
+ spinlock_t access_lock;
+
+ /* Flags */
+ u64 updating :1;
+ u64 incomplete :1;
+
+ unsigned long timestamp;
+
+ void *buffer;
+ size_t buffer_size;
+};
+
+/**
+ * struct zfcp_diag_adapter - central storage for all diagnostics concerning an
+ * adapter.
+ * @sysfs_established: flag showing that the associated sysfs-group was created
+ * during run of zfcp_adapter_enqueue().
+ * @max_age: maximum age of data in diagnostic buffers before they need to be
+ * refreshed (in ms).
+ * @port_data: data retrieved using exchange port data.
+ * @port_data.header: header with metadata for the cache in @port_data.data.
+ * @port_data.data: cached QTCB Bottom of command exchange port data.
+ * @config_data: data retrieved using exchange config data.
+ * @config_data.header: header with metadata for the cache in @config_data.data.
+ * @config_data.data: cached QTCB Bottom of command exchange config data.
+ */
+struct zfcp_diag_adapter {
+ u64 sysfs_established :1;
+
+ unsigned long max_age;
+
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_port data;
+ } port_data;
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_config data;
+ } config_data;
+};
+
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
+
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete);
+
+/*
+ * Function-Type used in zfcp_diag_update_buffer_limited() for the function
+ * that does the buffer-implementation dependent work.
+ */
+typedef int (*zfcp_diag_update_buffer_func)(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update);
+
+/**
+ * zfcp_diag_support_sfp() - Return %true if the @adapter supports reporting
+ * SFP Data.
+ * @adapter: adapter to test the availability of SFP Data reporting for.
+ */
+static inline bool
+zfcp_diag_support_sfp(const struct zfcp_adapter *const adapter)
+{
+ return !!(adapter->adapter_features & FSF_FEATURE_REPORT_SFP_DATA);
+}
+
+#endif /* ZFCP_DIAG_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 96f0d34e9459..93655b85b73f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -174,7 +174,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
- p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
@@ -190,7 +190,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
- a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (p_status & ZFCP_STATUS_COMMON_NOESC)
return need;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 31e8a7240fd7..c8556787cfdc 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+extern const struct attribute_group zfcp_sysfs_diag_attr_group;
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index cf63916814cc..223a805f0b0b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
@@ -19,6 +20,7 @@
#include "zfcp_dbf.h"
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
@@ -554,6 +556,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_diag_header *const diag_hdr =
+ &adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
struct Scsi_Host *shost = adapter->scsi_host;
@@ -570,6 +574,12 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -585,6 +595,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
fc_host_node_name(shost) = 0;
fc_host_port_name(shost) = 0;
fc_host_port_id(shost) = 0;
@@ -653,16 +666,28 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
{
+ struct zfcp_diag_header *const diag_hdr =
+ &req->adapter->diagnostics->port_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
+ struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
zfcp_fsf_exchange_port_evaluate(req);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
@@ -1261,7 +1286,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id;
@@ -1278,6 +1304,19 @@ out:
return retval;
}
+
+/**
+ * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Config Data was successful, @data is complete
+ * * -EIO - Exchange Config Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ */
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
@@ -1301,7 +1340,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
if (data)
req->data = data;
@@ -1309,9 +1349,16 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
+
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
@@ -1369,10 +1416,17 @@ out:
}
/**
- * zfcp_fsf_exchange_port_data_sync - request information about local port
- * @qdio: pointer to struct zfcp_qdio
- * @data: pointer to struct fsf_qtcb_bottom_port
- * Returns: 0 on success, error otherwise
+ * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Port Data was successful, @data is complete
+ * * -EIO - Exchange Port Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ * * -EOPNOTSUPP - This operation is not supported
*/
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
@@ -1408,10 +1462,15 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
-
return retval;
out_unlock:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 2c658b66318c..2b1e4da1944f 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -163,6 +163,8 @@
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
+#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200
+#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
@@ -407,7 +409,24 @@ struct fsf_qtcb_bottom_port {
u8 cp_util;
u8 cb_util;
u8 a_util;
- u8 res2[253];
+ u8 res2;
+ u16 temperature;
+ u16 vcc;
+ u16 tx_bias;
+ u16 tx_power;
+ u16 rx_power;
+ union {
+ u16 raw;
+ struct {
+ u16 fec_active :1;
+ u16:7;
+ u16 connector_type :2;
+ u16 sfp_invalid :1;
+ u16 optical_port :1;
+ u16 port_tx_type :4;
+ };
+ } sfp_flags;
+ u8 res3[240];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e9ded2befa0d..3910d529c15a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -605,7 +605,7 @@ zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret) {
+ if (ret != 0 && ret != -EAGAIN) {
kfree(data);
return NULL;
}
@@ -634,7 +634,7 @@ static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret)
+ if (ret != 0 && ret != -EAGAIN)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index af197e2b3e69..494b9fe9cc94 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
+#include "zfcp_diag.h"
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -325,6 +326,50 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ /* ceil(log(2^64 - 1) / log(10)) = 20 */
+ rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
+
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ unsigned long max_age;
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ rc = kstrtoul(buf, 10, &max_age);
+ if (rc != 0)
+ goto out;
+
+ adapter->diagnostics->max_age = max_age;
+
+ rc = count;
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
+ zfcp_sysfs_adapter_diag_max_age_show,
+ zfcp_sysfs_adapter_diag_max_age_store);
+
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
@@ -337,6 +382,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
+ &dev_attr_adapter_diag_max_age.attr,
NULL
};
@@ -577,7 +623,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
@@ -603,7 +649,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
@@ -664,3 +710,123 @@ struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_queue_full,
NULL
};
+
+static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
+ struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ struct zfcp_diag_header *diag_hdr;
+ struct fc_els_flogi *nsp;
+ ssize_t rc = -ENOLINK;
+ unsigned long flags;
+ unsigned int status;
+
+ if (!adapter)
+ return -ENODEV;
+
+ status = atomic_read(&adapter->status);
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))
+ goto out;
+
+ diag_hdr = &adapter->diagnostics->config_data.header;
+
+ rc = zfcp_diag_update_buffer_limited(
+ adapter, diag_hdr, zfcp_diag_update_config_data_buffer);
+ if (rc != 0)
+ goto out;
+
+ spin_lock_irqsave(&diag_hdr->access_lock, flags);
+ /* nport_serv_param doesn't contain the ELS_Command code */
+ nsp = (struct fc_els_flogi *)((unsigned long)
+ adapter->diagnostics->config_data
+ .data.nport_serv_param -
+ sizeof(u32));
+
+ rc = scnprintf(buf, 5 + 2, "%hu\n",
+ be16_to_cpu(nsp->fl_csp.sp_bb_cred));
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
+
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
+ zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
+
+#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \
+ static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+ { \
+ struct zfcp_adapter *const adapter = \
+ zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); \
+ struct zfcp_diag_header *diag_hdr; \
+ ssize_t rc = -ENOLINK; \
+ unsigned long flags; \
+ unsigned int status; \
+ \
+ if (!adapter) \
+ return -ENODEV; \
+ \
+ status = atomic_read(&adapter->status); \
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || \
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || \
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) \
+ goto out; \
+ \
+ if (!zfcp_diag_support_sfp(adapter)) { \
+ rc = -EOPNOTSUPP; \
+ goto out; \
+ } \
+ \
+ diag_hdr = &adapter->diagnostics->port_data.header; \
+ \
+ rc = zfcp_diag_update_buffer_limited( \
+ adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \
+ if (rc != 0) \
+ goto out; \
+ \
+ spin_lock_irqsave(&diag_hdr->access_lock, flags); \
+ rc = scnprintf( \
+ buf, (_prtsize) + 2, _prtfmt "\n", \
+ adapter->diagnostics->port_data.data._qtcb_member); \
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \
+ \
+ out: \
+ zfcp_ccw_adapter_put(adapter); \
+ return rc; \
+ } \
+ static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
+ zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
+
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
+
+static struct attribute *zfcp_sysfs_diag_attrs[] = {
+ &dev_attr_adapter_diag_sfp_temperature.attr,
+ &dev_attr_adapter_diag_sfp_vcc.attr,
+ &dev_attr_adapter_diag_sfp_tx_bias.attr,
+ &dev_attr_adapter_diag_sfp_tx_power.attr,
+ &dev_attr_adapter_diag_sfp_rx_power.attr,
+ &dev_attr_adapter_diag_sfp_port_tx_type.attr,
+ &dev_attr_adapter_diag_sfp_optical_port.attr,
+ &dev_attr_adapter_diag_sfp_sfp_invalid.attr,
+ &dev_attr_adapter_diag_sfp_connector_type.attr,
+ &dev_attr_adapter_diag_sfp_fec_active.attr,
+ &dev_attr_adapter_diag_b2b_credit.attr,
+ NULL,
+};
+
+const struct attribute_group zfcp_sysfs_diag_attr_group = {
+ .name = "diagnostics",
+ .attrs = zfcp_sysfs_diag_attrs,
+};
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 536426f25e86..f2f7e6e76c07 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif
+static unsigned int disconnect_mask = ~0;
+module_param(disconnect_mask, int, 0444);
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -172,6 +175,19 @@ static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
}
}
+static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
+{
+ int resid = cmd->SCp.this_residual;
+ struct scatterlist *s = cmd->SCp.buffer;
+
+ if (s)
+ while (!sg_is_last(s)) {
+ s = sg_next(s);
+ resid += s->length;
+ }
+ scsi_set_resid(cmd, resid);
+}
+
/**
* NCR5380_poll_politely2 - wait for two chip register values
* @hostdata: host private data
@@ -954,7 +970,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
- cmd->cmnd[0] != REQUEST_SENSE;
+ cmd->cmnd[0] != REQUEST_SENSE &&
+ (disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -1379,7 +1396,7 @@ static void do_reset(struct Scsi_Host *instance)
* MESSAGE OUT phase and sending an ABORT message.
* @instance: relevant scsi host instance
*
- * Returns 0 on success, -1 on failure.
+ * Returns 0 on success, negative error code on failure.
*/
static int do_abort(struct Scsi_Host *instance)
@@ -1404,7 +1421,7 @@ static int do_abort(struct Scsi_Host *instance)
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
tmp = NCR5380_read(STATUS_REG) & PHASE_MASK;
@@ -1415,7 +1432,7 @@ static int do_abort(struct Scsi_Host *instance)
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
}
@@ -1424,17 +1441,17 @@ static int do_abort(struct Scsi_Host *instance)
len = 1;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
+ if (len)
+ rc = -ENXIO;
/*
* If we got here, and the command completed successfully,
* we're about to go into bus free state.
*/
- return len ? -1 : 0;
-
-timeout:
+out:
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- return -1;
+ return rc;
}
/*
@@ -1803,6 +1820,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result |= cmd->SCp.Status;
cmd->result |= cmd->SCp.Message << 8;
+ set_resid_from_SCp(cmd);
+
if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd);
else {
@@ -2264,7 +2283,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
hostdata->dma_len = 0;
- if (do_abort(instance)) {
+ if (do_abort(instance) < 0) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
result = FAILED;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 0ed3f806ace5..e36608ce937a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1477,6 +1477,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
struct aac_srb * srbcmd;
u32 flag;
u32 timeout;
+ struct aac_dev *dev = fib->dev;
aac_fib_init(fib);
switch(cmd->sc_data_direction){
@@ -1503,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->request->timeout/HZ;
if (timeout == 0)
- timeout = 1;
+ timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
@@ -2467,13 +2468,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
@@ -2559,13 +2560,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3fa03230f6ba..e3e4ecbea726 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -85,7 +85,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50877
+# define AAC_DRIVER_BUILD 50983
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -108,6 +108,8 @@ enum {
#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define AAC_SA_TIMEOUT 180
+#define AAC_ARC_TIMEOUT 60
#define get_bus_number(x) (x/AAC_MAX_TARGETS)
#define get_target_number(x) (x%AAC_MAX_TARGETS)
@@ -1328,7 +1330,7 @@ struct fib {
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
+#define AAC_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
@@ -1601,6 +1603,7 @@ struct aac_dev
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
struct delayed_work safw_rescan_work;
+ struct delayed_work src_reinit_aif_worker;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1673,6 +1676,7 @@ struct aac_dev
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
+ u8 soft_reset_support;
};
#define aac_adapter_interrupt(dev) \
@@ -2644,7 +2648,12 @@ int aac_scan_host(struct aac_dev *dev);
static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
{
- schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_RESCAN_DELAY);
+}
+
+static inline void aac_schedule_src_reinit_aif_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->src_reinit_aif_worker, AAC_RESCAN_DELAY);
}
static inline void aac_safw_rescan_worker(struct work_struct *work)
@@ -2658,10 +2667,10 @@ static inline void aac_safw_rescan_worker(struct work_struct *work)
aac_scan_host(dev);
}
-static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
{
- if (dev->sa_firmware)
- cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
}
/* SCp.phase values */
@@ -2671,6 +2680,7 @@ static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
#define AAC_OWNER_FIRMWARE 0x106
void aac_safw_rescan_worker(struct work_struct *work);
+void aac_src_reinit_aif_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
int aac_setup_safw_adapter(struct aac_dev *dev);
@@ -2728,6 +2738,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index);
static inline int aac_is_src(struct aac_dev *dev)
{
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index d4fcfa1e54e0..f75878d773cf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -571,6 +571,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
else
dev->sa_firmware = 0;
+ if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET))
+ dev->soft_reset_support = 1;
+ else
+ dev->soft_reset_support = 0;
+
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 2142a649e865..5a8a999606ea 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -232,6 +232,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
fibptr->callback_data = NULL;
fibptr->callback = NULL;
+ fibptr->flags = 0;
return fibptr;
}
@@ -1463,6 +1464,14 @@ retry_next:
}
}
+static void aac_schedule_bus_scan(struct aac_dev *aac)
+{
+ if (aac->sa_firmware)
+ aac_schedule_safw_scan_worker(aac);
+ else
+ aac_schedule_src_reinit_aif_worker(aac);
+}
+
static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
@@ -1638,7 +1647,7 @@ out:
*/
if (!retval && !is_kdump_kernel()) {
dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
- aac_schedule_safw_scan_worker(aac);
+ aac_schedule_bus_scan(aac);
}
if (jafo) {
@@ -1959,6 +1968,16 @@ int aac_scan_host(struct aac_dev *dev)
return rcode;
}
+void aac_src_reinit_aif_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, src_reinit_aif_worker);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+ aac_reinit_aif(dev, dev->cardtype);
+}
+
/**
* aac_handle_sa_aif Handle a message from the firmware
* @dev: Which adapter this fib is from
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4a858789e6c5..ee6bc2f9b80a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -391,6 +391,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
int chn, tid;
unsigned int depth = 0;
unsigned int set_timeout = 0;
+ int timeout = 0;
bool set_qd_dev_type = false;
u8 devtype = 0;
@@ -483,10 +484,13 @@ common_config:
/*
* Firmware has an individual device recovery time typically
- * of 35 seconds, give us a margin.
+ * of 35 seconds, give us a margin. Thor devices can take longer in
+ * error recovery, hence different value.
*/
- if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
- blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+ if (set_timeout) {
+ timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
+ }
if (depth > 256)
depth = 256;
@@ -608,9 +612,13 @@ static struct device_attribute *aac_dev_attrs[] = {
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
+ int retval;
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ retval = aac_adapter_check_health(dev);
+ if (retval)
+ return -EBUSY;
return aac_do_ioctl(dev, cmd, arg);
}
@@ -1585,6 +1593,19 @@ static void aac_init_char(void)
}
}
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index)
+{
+ /*
+ * Firmware may send a AIF messages very early and the Driver may have
+ * ignored as it is not fully ready to process the messages. Send
+ * AIF to firmware so that if there are any unprocessed events they
+ * can be processed now.
+ */
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
+
+}
+
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned index = id->driver_data;
@@ -1682,6 +1703,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&aac->scan_mutex);
INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
+ INIT_DELAYED_WORK(&aac->src_reinit_aif_worker,
+ aac_src_reinit_aif_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1872,7 +1895,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1931,7 +1954,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1989,7 +2012,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b66e06726c8..787ec9baebb0 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -733,10 +733,20 @@ static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
return ctrl_up;
}
+static void aac_src_drop_io(struct aac_dev *dev)
+{
+ if (!dev->soft_reset_support)
+ return;
+
+ aac_adapter_sync_cmd(dev, DROP_IO,
+ 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
{
aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
NULL, NULL, NULL, NULL);
+ aac_src_drop_io(dev);
}
static void aac_send_iop_reset(struct aac_dev *dev)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 88053b15c363..db687ef8a99e 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1400,7 +1400,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
, pCCB->acb
, pCCB->startdone
, atomic_read(&acb->ccboutstandingcount));
- return;
+ return;
}
arcmsr_report_ccb_state(acb, pCCB, error);
}
@@ -3476,8 +3476,8 @@ polling_hbc_ccb_retry:
, pCCB->pcmd->device->id
, (u32)pCCB->pcmd->device->lun
, pCCB);
- pCCB->pcmd->result = DID_ABORT << 16;
- arcmsr_ccb_complete(pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index d12dd89538df..ddb52e7ba622 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1067,7 +1067,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
* Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct
* Params : host - host to finish
* Notes : This is called when a command is:
- * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONECT
+ * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT
* : This must not return until all transfers are completed.
*/
static
@@ -1816,7 +1816,7 @@ int acornscsi_reconnect(AS_Host *host)
}
/*
- * Function: int acornscsi_reconect_finish(AS_Host *host)
+ * Function: int acornscsi_reconnect_finish(AS_Host *host)
* Purpose : finish reconnecting a command
* Params : host - host to complete
* Returns : 0 if failed
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index e809493d0d06..a82b63a66635 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e41f0bbdc9fd..c6a752309dda 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1680,7 +1680,7 @@ static struct scsi_host_template atp870u_template = {
.bios_param = atp870u_biosparam /* biosparm */,
.can_queue = qcnt /* can_queue */,
.this_id = 7 /* SCSI ID */,
- .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
+ .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
.max_sectors = ATP870U_MAX_SECTORS,
};
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2f9213b257a4..eb0c76338295 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1487,8 +1487,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return ret;
}
-int
-restart_bfa(struct bfad_s *bfad)
+static int restart_bfa(struct bfad_s *bfad)
{
unsigned long flags;
struct pci_dev *pdev = bfad->pcidev;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 29ab81df75c0..fbfce02e5b93 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -275,8 +275,10 @@ bfad_im_get_stats(struct Scsi_Host *shost)
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- if (rc != BFA_STATUS_OK)
+ if (rc != BFA_STATUS_OK) {
+ kfree(fcstats);
return NULL;
+ }
wait_for_completion(&fcomp.comp);
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index e4469df9c469..698f5ebaa0c2 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -813,7 +813,7 @@ struct fcoe_confqe {
/*
- * FCoE conection data base
+ * FCoE connection data base
*/
struct fcoe_conn_db {
#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 401743e2b429..4c8122a82322 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1242,7 +1242,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
time_left = wait_for_completion_timeout(&io_req->abts_done,
- (2 * rp->r_a_tov + 1) * HZ);
+ msecs_to_jiffies(2 * rp->r_a_tov + 1));
if (time_left)
BNX2FC_IO_DBG(io_req,
"Timed out in eh_abort waiting for abts_done");
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c5fa5f3b00e9..0b28d44d3573 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
- pci_dev_put(hba->pcidev);
if (hba->regview) {
pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
+ pci_dev_put(hba->pcidev);
bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index e51923886475..950f9cdf0577 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -793,10 +793,10 @@ csio_hw_get_flash_params(struct csio_hw *hw)
goto found;
}
- /* Decode Flash part size. The code below looks repetative with
+ /* Decode Flash part size. The code below looks repetitive with
* common encodings, but that's not guaranteed in the JEDEC
- * specification for the Read JADEC ID command. The only thing that
- * we're guaranteed by the JADEC specification is where the
+ * specification for the Read JEDEC ID command. The only thing that
+ * we're guaranteed by the JEDEC specification is where the
* Manufacturer ID is in the returned result. After that each
* Manufacturer ~could~ encode things completely differently.
* Note, all Flash parts must have 64KB sectors.
@@ -983,8 +983,8 @@ retry:
waiting -= 50;
/*
- * If neither Error nor Initialialized are indicated
- * by the firmware keep waiting till we exaust our
+ * If neither Error nor Initialized are indicated
+ * by the firmware keep waiting till we exhaust our
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
@@ -1738,7 +1738,7 @@ static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
* Convert Common Code Forward Error Control settings into the
* Firmware's API. If the current Requested FEC has "Automatic"
* (IEEE 802.3) specified, then we use whatever the Firmware
- * sent us as part of it's IEEE 802.3-based interpratation of
+ * sent us as part of it's IEEE 802.3-based interpretation of
* the Transceiver Module EPROM FEC parameters. Otherwise we
* use whatever is in the current Requested FEC settings.
*/
@@ -2834,7 +2834,7 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
}
/*
- * csio_hws_initializing - Initialiazing state
+ * csio_hws_initializing - Initializing state
* @hw - HW module
* @evt - Event
*
@@ -3049,7 +3049,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
if (!csio_is_hw_master(hw))
break;
/*
- * The BYE should have alerady been issued, so we cant
+ * The BYE should have already been issued, so we can't
* use the mailbox interface. Hence we use the PL_RST
* register directly.
*/
@@ -3104,7 +3104,7 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
- * interrupts described by the mask have occured. The actions include
+ * interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
@@ -4219,7 +4219,7 @@ csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
* @hw: Pointer to HW module.
*
* It is assumed that the initialization is a synchronous operation.
- * So when we return afer posting the event, the HW SM should be in
+ * So when we return after posting the event, the HW SM should be in
* the ready state, if there were no errors during init.
*/
int
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index a6dd704d7f2d..2e8a3ac575cb 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -154,13 +154,10 @@ csio_dfs_create(struct csio_hw *hw)
/*
* csio_dfs_destroy - Destroys per-hw debugfs.
*/
-static int
+static void
csio_dfs_destroy(struct csio_hw *hw)
{
- if (hw->debugfs_root)
- debugfs_remove_recursive(hw->debugfs_root);
-
- return 0;
+ debugfs_remove_recursive(hw->debugfs_root);
}
/*
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 66e58f0a75dc..74ff8adc41f7 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/**
@@ -1989,7 +1992,7 @@ static int
csio_ln_init(struct csio_lnode *ln)
{
int rv = -EINVAL;
- struct csio_lnode *rln, *pln;
+ struct csio_lnode *pln;
struct csio_hw *hw = csio_lnode_to_hw(ln);
csio_init_state(&ln->sm, csio_lns_uninit);
@@ -2019,7 +2022,6 @@ csio_ln_init(struct csio_lnode *ln)
* THe rest is common for non-root physical and NPIV lnodes.
* Just get references to all other modules
*/
- rln = csio_root_lnode(ln);
if (csio_is_npiv_ln(ln)) {
/* NPIV */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 6f13673d6aa0..94810b19e747 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1210,7 +1210,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
!csio_is_hw_intr_enabled(hw)) {
csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
*((uint8_t *)mbp->mb));
- goto error_out;
+ goto error_out;
}
if (mbm->mcurrent != NULL) {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index da50e87921bc..bc1086ae6835 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -2073,7 +2073,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct net_device *ndev = cdev->ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int i, err;
if (!lldi->vr->iscsi.size) {
@@ -2082,7 +2081,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
}
cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3e17af8aedeb..0d044c165960 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2284,34 +2284,6 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
- cxgbi_sock_put(csk);
-
- return len;
-}
-
-static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- if (csk->csk_family == AF_INET)
- len = sprintf(buf, "%pI4",
- &csk->daddr.sin_addr.s_addr);
- else
- len = sprintf(buf, "%pI6",
- &csk->daddr6.sin6_addr);
-
- cxgbi_sock_put(csk);
-
- return len;
-}
-
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
char *buf)
{
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 2dbf35f82787..fbd2ae40dab4 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -44,14 +44,12 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb *ioarcb;
struct sisl_ioasa *ioasa;
u32 resid;
if (unlikely(!cmd))
return;
- ioarcb = &(cmd->rcb);
ioasa = &(cmd->sa);
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 7bd376d95ed5..b02ac389e6c6 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults");
+ up(&a->nvram_semaphore);
return false;
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 80608b53897b..8ef150dfb6f7 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1024,7 +1024,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
atomic64_inc(&fnic_stats->io_stats.io_completions);
- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
+ io_duration_time = jiffies_to_msecs(jiffies) -
+ jiffies_to_msecs(start_time);
if(io_duration_time <= 10)
atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 78af9cc2009b..1f55b9e4e74a 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -259,7 +259,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
u32 status;
- int dev_cmd_err[] = {
+ static const int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 720c4d6be939..233c73e01246 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/timer.h>
#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
@@ -84,6 +85,7 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
+#define CLEAR_ITCT_TIMEOUT 20
struct hisi_hba;
@@ -167,6 +169,7 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
int enable;
+ atomic_t down_cnt;
};
struct hisi_sas_port {
@@ -296,8 +299,8 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*clear_itct)(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *dev);
+ int (*clear_itct)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *dev);
void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
@@ -321,6 +324,44 @@ struct hisi_sas_hw {
const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
+#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
+
+struct hisi_sas_debugfs_cq {
+ struct hisi_sas_cq *cq;
+ void *complete_hdr;
+};
+
+struct hisi_sas_debugfs_dq {
+ struct hisi_sas_dq *dq;
+ struct hisi_sas_cmd_hdr *hdr;
+};
+
+struct hisi_sas_debugfs_regs {
+ struct hisi_hba *hisi_hba;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_port {
+ struct hisi_sas_phy *phy;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_iost {
+ struct hisi_sas_iost *iost;
+};
+
+struct hisi_sas_debugfs_itct {
+ struct hisi_sas_itct *itct;
+};
+
+struct hisi_sas_debugfs_iost_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
+struct hisi_sas_debugfs_itct_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
struct hisi_hba {
/* This must be the first element, used by SHOST_TO_SAS_HA */
struct sas_ha_struct *p;
@@ -402,19 +443,20 @@ struct hisi_hba {
/* debugfs memories */
/* Put Global AXI and RAS Register into register array */
- u32 *debugfs_regs[DEBUGFS_REGS_NUM];
- u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
- void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_iost *debugfs_iost;
- struct hisi_sas_itct *debugfs_itct;
- u64 *debugfs_iost_cache;
- u64 *debugfs_itct_cache;
-
+ struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM];
+ struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS];
+ struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+
+ u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP];
+ int debugfs_dump_index;
struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry;
- bool debugfs_snapshot;
};
/* Generic HW DMA host memory structures */
@@ -556,6 +598,7 @@ struct hisi_sas_slot_dif_buf_table {
extern struct scsi_transport_template *hisi_sas_stt;
extern bool hisi_sas_debugfs_enable;
+extern u32 hisi_sas_debugfs_dump_count;
extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 0847e682797b..03588ec3c394 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (in_softirq())
+ /*
+ * For IOs from upper layer, it may already disable preempt
+ * in the IO path, if disable preempt again in down(),
+ * function schedule() will report schedule_bug(), so check
+ * preemptible() before goto down().
+ */
+ if (!preemptible())
return -EINVAL;
down(&hisi_hba->sem);
@@ -968,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ struct hisi_sas_port *port;
unsigned long flags;
if (!sas_port)
return;
+ port = to_hisi_sas_port(sas_port);
spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
@@ -1045,6 +1052,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
+ int ret = 0;
dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
@@ -1056,13 +1064,16 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
device->lldd_dev = NULL;
}
if (hisi_hba->hw->free_device)
hisi_hba->hw->free_device(sas_dev);
- sas_dev->dev_type = SAS_PHY_UNUSED;
+
+ /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
+ if (!ret)
+ sas_dev->dev_type = SAS_PHY_UNUSED;
sas_dev->sas_device = NULL;
up(&hisi_hba->sem);
}
@@ -1402,7 +1413,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- bool do_port_check = !!(_sas_port != sas_port);
+ bool do_port_check = _sas_port != sas_port;
if (!sas_phy->phy->enabled)
continue;
@@ -1563,7 +1574,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
int rc;
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!hisi_hba->hw->soft_reset)
@@ -2055,7 +2066,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
/* Internal abort timed out */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -2676,6 +2687,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
return rc;
@@ -2687,10 +2699,11 @@ struct dentry *hisi_sas_debugfs_dir;
static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++)
- memcpy(hisi_hba->debugfs_complete_hdr[i],
+ memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
hisi_hba->complete_hdr[i],
HISI_SAS_QUEUE_SLOTS * queue_entry_size);
}
@@ -2698,13 +2711,14 @@ static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
int j;
- debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
+ debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
cmd_hdr = hisi_hba->cmd_hdr[i];
for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
@@ -2715,6 +2729,7 @@ static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
const struct hisi_sas_debugfs_reg *port =
hisi_hba->hw->debugfs_reg_port;
int i, phy_cnt;
@@ -2722,7 +2737,7 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
u32 *databuf;
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
- databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
+ databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) {
offset = port->base_off + 4 * i;
*databuf = port->read_port_reg(hisi_hba, phy_cnt,
@@ -2733,7 +2748,8 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *global =
hw->debugfs_reg_array[DEBUGFS_GLOBAL];
@@ -2745,7 +2761,8 @@ static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *axi =
hw->debugfs_reg_array[DEBUGFS_AXI];
@@ -2758,7 +2775,8 @@ static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *ras =
hw->debugfs_reg_array[DEBUGFS_RAS];
@@ -2771,8 +2789,9 @@ static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
{
- void *cachebuf = hisi_hba->debugfs_itct_cache;
- void *databuf = hisi_hba->debugfs_itct;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
struct hisi_sas_itct *itct;
int i;
@@ -2789,9 +2808,10 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
int max_command_entries = HISI_SAS_MAX_COMMANDS;
- void *cachebuf = hisi_hba->debugfs_iost_cache;
- void *databuf = hisi_hba->debugfs_iost;
+ void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
struct hisi_sas_iost *iost;
int i;
@@ -2842,11 +2862,12 @@ static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *global = s->private;
+ struct hisi_hba *hisi_hba = global->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
+ hisi_sas_debugfs_print_reg(global->data,
reg_global, s);
return 0;
@@ -2868,11 +2889,12 @@ static const struct file_operations hisi_sas_debugfs_global_fops = {
static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *axi = s->private;
+ struct hisi_hba *hisi_hba = axi->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
+ hisi_sas_debugfs_print_reg(axi->data,
reg_axi, s);
return 0;
@@ -2894,11 +2916,12 @@ static const struct file_operations hisi_sas_debugfs_axi_fops = {
static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *ras = s->private;
+ struct hisi_hba *hisi_hba = ras->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
+ hisi_sas_debugfs_print_reg(ras->data,
reg_ras, s);
return 0;
@@ -2920,13 +2943,13 @@ static const struct file_operations hisi_sas_debugfs_ras_fops = {
static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
{
- struct hisi_sas_phy *phy = s->private;
+ struct hisi_sas_debugfs_port *port = s->private;
+ struct hisi_sas_phy *phy = port->phy;
struct hisi_hba *hisi_hba = phy->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
- u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
- hisi_sas_debugfs_print_reg(databuf, reg_port, s);
+ hisi_sas_debugfs_print_reg(port->data, reg_port, s);
return 0;
}
@@ -2975,13 +2998,13 @@ static void hisi_sas_show_row_32(struct seq_file *s, int index,
seq_puts(s, "\n");
}
-static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
+static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
+ struct hisi_sas_debugfs_cq *debugfs_cq)
{
- struct hisi_sas_cq *cq = cq_ptr;
+ struct hisi_sas_cq *cq = debugfs_cq->cq;
struct hisi_hba *hisi_hba = cq->hisi_hba;
- void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
- __le32 *complete_hdr = complete_queue +
- (hisi_hba->hw->complete_hdr_size * slot);
+ __le32 *complete_hdr = debugfs_cq->complete_hdr +
+ (hisi_hba->hw->complete_hdr_size * slot);
hisi_sas_show_row_32(s, slot,
hisi_hba->hw->complete_hdr_size,
@@ -2990,11 +3013,11 @@ static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
{
- struct hisi_sas_cq *cq = s->private;
+ struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
int slot;
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_cq_show_slot(s, slot, cq);
+ hisi_sas_cq_show_slot(s, slot, debugfs_cq);
}
return 0;
}
@@ -3014,9 +3037,8 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = {
static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
{
- struct hisi_sas_dq *dq = dq_ptr;
- struct hisi_hba *hisi_hba = dq->hisi_hba;
- void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
+ struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
+ void *cmd_queue = debugfs_dq->hdr;
__le32 *cmd_hdr = cmd_queue +
sizeof(struct hisi_sas_cmd_hdr) * slot;
@@ -3048,14 +3070,14 @@ static const struct file_operations hisi_sas_debugfs_dq_fops = {
static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
+ struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
+ struct hisi_sas_iost *iost = debugfs_iost->iost;
int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
- for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
- __le64 *iost = &debugfs_iost->qw0;
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ __le64 *data = &iost->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
+ hisi_sas_show_row_64(s, i, sizeof(*iost), data);
}
return 0;
@@ -3076,9 +3098,8 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = {
static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *iost_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
+ struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
+ struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *iost;
@@ -3117,13 +3138,13 @@ static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
{
int i;
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
+ struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ struct hisi_sas_itct *itct = debugfs_itct->itct;
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
- __le64 *itct = &debugfs_itct->qw0;
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ __le64 *data = &itct->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
+ hisi_sas_show_row_64(s, i, sizeof(*itct), data);
}
return 0;
@@ -3144,9 +3165,8 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = {
static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *itct_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
+ struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
+ struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *itct;
@@ -3184,6 +3204,8 @@ static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
{
+ u64 *debugfs_timestamp;
+ int dump_index = hisi_hba->debugfs_dump_index;
struct dentry *dump_dentry;
struct dentry *dentry;
char name[256];
@@ -3191,19 +3213,26 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
int c;
int d;
- /* Create dump dir inside device dir */
- dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
- hisi_hba->debugfs_dump_dentry = dump_dentry;
+ snprintf(name, 256, "%d", dump_index);
+
+ dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
- debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
- &hisi_sas_debugfs_global_fops);
+ debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
+
+ debugfs_create_u64("timestamp", 0400, dump_dentry,
+ debugfs_timestamp);
+
+ debugfs_create_file("global", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
+ &hisi_sas_debugfs_global_fops);
/* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) {
snprintf(name, 256, "%d", p);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_port_reg[dump_index][p],
&hisi_sas_debugfs_port_fops);
}
@@ -3212,7 +3241,8 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (c = 0; c < hisi_hba->queue_count; c++) {
snprintf(name, 256, "%d", c);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_cq[dump_index][c],
&hisi_sas_debugfs_cq_fops);
}
@@ -3221,26 +3251,33 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (d = 0; d < hisi_hba->queue_count; d++) {
snprintf(name, 256, "%d", d);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_dq[dump_index][d],
&hisi_sas_debugfs_dq_fops);
}
- debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost[dump_index],
&hisi_sas_debugfs_iost_fops);
- debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost_cache[dump_index],
&hisi_sas_debugfs_iost_cache_fops);
- debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct[dump_index],
&hisi_sas_debugfs_itct_fops);
- debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct_cache[dump_index],
&hisi_sas_debugfs_itct_cache_fops);
- debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("axi", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
&hisi_sas_debugfs_axi_fops);
- debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("ras", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
&hisi_sas_debugfs_ras_fops);
return;
@@ -3271,8 +3308,7 @@ static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
struct hisi_hba *hisi_hba = file->f_inode->i_private;
char buf[8];
- /* A bit racy, but don't care too much since it's only debugfs */
- if (hisi_hba->debugfs_snapshot)
+ if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
return -EFAULT;
if (count > 8)
@@ -3539,7 +3575,7 @@ static const struct {
int value;
char *name;
} hisi_sas_debugfs_loop_modes[] = {
- { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
{ HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
{ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
};
@@ -3670,132 +3706,201 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
.owner = THIS_MODULE,
};
+static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct hisi_sas_phy *phy = s->private;
+ unsigned int set_val;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, &set_val);
+ if (res)
+ return res;
+
+ if (set_val > 0)
+ return -EINVAL;
+
+ atomic_set(&phy->down_cnt, 0);
+
+ return count;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
+ .open = hisi_sas_debugfs_phy_down_cnt_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_phy_down_cnt_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
void hisi_sas_debugfs_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, debugfs_work);
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
- if (hisi_hba->debugfs_snapshot)
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
return;
- hisi_hba->debugfs_snapshot = true;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
hisi_sas_debugfs_snapshot_regs(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
{
struct device *dev = hisi_hba->dev;
int i;
- devm_kfree(dev, hisi_hba->debugfs_iost_cache);
- devm_kfree(dev, hisi_hba->debugfs_itct_cache);
- devm_kfree(dev, hisi_hba->debugfs_iost);
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
+ devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
+ devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
+ devm_kfree(dev,
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
for (i = 0; i < DEBUGFS_REGS_NUM; i++)
- devm_kfree(dev, hisi_hba->debugfs_regs[i]);
+ devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
for (i = 0; i < hisi_hba->n_phy; i++)
- devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
}
-static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
{
const struct hisi_sas_hw *hw = hisi_hba->hw;
struct device *dev = hisi_hba->dev;
- int p, c, d;
+ int p, c, d, r, i;
size_t sz;
- hisi_hba->debugfs_dump_dentry =
- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+ for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
+ struct hisi_sas_debugfs_regs *regs =
+ &hisi_hba->debugfs_regs[dump_index][r];
- sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
- goto fail;
+ sz = hw->debugfs_reg_array[r]->count * 4;
+ regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!regs->data)
+ goto fail;
+ regs->hisi_hba = hisi_hba;
+ }
sz = hw->debugfs_reg_port->count * 4;
for (p = 0; p < hisi_hba->n_phy; p++) {
- hisi_hba->debugfs_port_reg[p] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_port *port =
+ &hisi_hba->debugfs_port_reg[dump_index][p];
- if (!hisi_hba->debugfs_port_reg[p])
+ port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!port->data)
goto fail;
+ port->phy = &hisi_hba->phy[p];
}
- sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_AXI] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
- goto fail;
-
- sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_RAS] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
- goto fail;
-
sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
for (c = 0; c < hisi_hba->queue_count; c++) {
- hisi_hba->debugfs_complete_hdr[c] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_cq *cq =
+ &hisi_hba->debugfs_cq[dump_index][c];
- if (!hisi_hba->debugfs_complete_hdr[c])
+ cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!cq->complete_hdr)
goto fail;
+ cq->cq = &hisi_hba->cq[c];
}
sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
for (d = 0; d < hisi_hba->queue_count; d++) {
- hisi_hba->debugfs_cmd_hdr[d] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_dq *dq =
+ &hisi_hba->debugfs_dq[dump_index][d];
- if (!hisi_hba->debugfs_cmd_hdr[d])
+ dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!dq->hdr)
goto fail;
+ dq->dq = &hisi_hba->dq[d];
}
sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
- hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost)
+ hisi_hba->debugfs_iost[dump_index].iost =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost[dump_index].iost)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost_cache)
+ hisi_hba->debugfs_iost_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct_cache)
+ hisi_hba->debugfs_itct_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
goto fail;
/* New memory allocation must be locate before itct */
sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct)
+ hisi_hba->debugfs_itct[dump_index].itct =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct[dump_index].itct)
goto fail;
return 0;
fail:
- hisi_sas_debugfs_release(hisi_hba);
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+ hisi_sas_debugfs_release(hisi_hba, i);
return -ENOMEM;
}
+static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
+{
+ struct dentry *dir = debugfs_create_dir("phy_down_cnt",
+ hisi_hba->debugfs_dir);
+ char name[16];
+ int phy_no;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ snprintf(name, 16, "%d", phy_no);
+ debugfs_create_file(name, 0600, dir,
+ &hisi_hba->phy[phy_no],
+ &hisi_sas_debugfs_phy_down_cnt_ops);
+ }
+}
+
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{
hisi_hba->debugfs_bist_dentry =
@@ -3827,6 +3932,7 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ int i;
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir);
@@ -3838,9 +3944,17 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
/* create bist structures */
hisi_sas_debugfs_bist_init(hisi_hba);
- if (hisi_sas_debugfs_alloc(hisi_hba)) {
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
- dev_dbg(dev, "failed to init debugfs!\n");
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+ hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
}
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
@@ -3874,14 +3988,24 @@ EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+u32 hisi_sas_debugfs_dump_count = 1;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
+module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
+MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
+
static __init int hisi_sas_init(void)
{
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
if (!hisi_sas_stt)
return -ENOMEM;
- if (hisi_sas_debugfs_enable)
+ if (hisi_sas_debugfs_enable) {
hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
+ if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
+ pr_info("hisi_sas: Limiting debugfs dump count\n");
+ hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
+ }
+ }
return 0;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index b861a0f14c9d..3af53cc42bd6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -531,8 +531,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
@@ -551,6 +551,8 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
qw0 = le64_to_cpu(itct->qw0);
qw0 &= ~ITCT_HDR_VALID_MSK;
itct->qw0 = cpu_to_le64(qw0);
+
+ return 0;
}
static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 8e96a257e439..61b1e2693b08 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -974,13 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
int i;
sas_dev->completion = &completion;
@@ -990,13 +991,19 @@ static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
+ /* need to set register twice to clear ITCT for v2 hw */
for (i = 0; i < 2; i++) {
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
memset(itct, 0, sizeof(struct hisi_sas_itct));
}
+ return 0;
}
static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index cb8d087762db..bf5d5f138437 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -795,13 +795,14 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
sas_dev->completion = &completion;
@@ -814,8 +815,14 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
+
memset(itct, 0, sizeof(struct hisi_sas_itct));
+ return 0;
}
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@@ -1542,6 +1549,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct device *dev = hisi_hba->dev;
+ atomic_inc(&phy->down_cnt);
+
del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
@@ -3022,11 +3031,6 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CTRL, reg_val);
- mdelay(100);
- reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
-
/* set the bist init value */
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CODE,
@@ -3035,6 +3039,11 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1,
SAS_PHY_BIST_CODE1_INIT);
+ mdelay(100);
+ reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CTRL, reg_val);
+
/* clear error bit */
mdelay(100);
hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
@@ -3259,6 +3268,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
err_out_regions:
pci_release_regions(pdev);
@@ -3292,8 +3302,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- hisi_sas_debugfs_exit(hisi_hba);
-
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
@@ -3305,6 +3313,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
}
@@ -3422,6 +3431,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
if (rc) {
scsi_remove_host(shost);
pci_disable_device(pdev);
+ return rc;
}
hisi_hba->hw->phys_init(hisi_hba);
sas_resume_ha(sha);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 55522b7162d3..1d669e47b692 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -38,6 +38,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -554,13 +555,29 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
+static bool scsi_host_check_in_flight(struct request *rq, void *data,
+ bool reserved)
+{
+ int *count = data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
+ (*count)++;
+
+ return true;
+}
+
/**
* scsi_host_busy - Return the host busy counter
* @shost: Pointer to Scsi_Host to inc.
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
- return atomic_read(&shost->host_busy);
+ int cnt = 0;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_host_check_in_flight, &cnt);
+ return cnt;
}
EXPORT_SYMBOL(scsi_host_busy);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index a929fe76102b..54b8c6f9daf4 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2354,7 +2354,6 @@ static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
{
struct iu_entry *iue = cmd->iue;
struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
- long rc = ADAPT_SUCCESS;
if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
!list_empty(&vscsi->waiting_rsp)) {
@@ -2370,7 +2369,7 @@ static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
}
- return rc;
+ return ADAPT_SUCCESS;
}
/* Called with intr lock held */
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e8bc8d328bab..f25672982c5f 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -498,7 +498,7 @@ ips_setup(char *ips_str)
int i;
char *key;
char *value;
- IPS_OPTION options[] = {
+ static const IPS_OPTION options[] = {
{"noi2o", &ips_force_i2o, 0},
{"nommap", &ips_force_memio, 0},
{"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 9e8de1462593..b1c197505579 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -147,7 +147,7 @@ static struct isci_port *sci_port_configuration_agent_find_port(
/**
*
* @controller: This is the controller object that contains the port agent
- * @port_agent: This is the port configruation agent for the controller.
+ * @port_agent: This is the port configuration agent for the controller.
*
* This routine will validate the port configuration is correct for the SCU
* hardware. The SCU hardware allows for port configurations as follows. LP0
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 49aa4e657c44..cd1e4b4d95bb 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1504,7 +1504,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
* @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the isci_port conected to this device.
+ * @port: This parameter specifies the isci_port connected to this device.
*
* pointer to new isci_remote_device.
*/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7bedbe877704..0bc63a7ab41c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;
+ if (!tcp_sw_conn->sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Transport not bound to socket!\n");
+ return -EINVAL;
+ }
+
noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 691acbdcc46d..935f98804198 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -605,6 +605,12 @@ struct lpfc_epd_pool {
spinlock_t lock; /* lock for expedite pool */
};
+enum ras_state {
+ INACTIVE,
+ REG_INPROGRESS,
+ ACTIVE
+};
+
struct lpfc_ras_fwlog {
uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */
@@ -621,7 +627,7 @@ struct lpfc_ras_fwlog {
bool ras_enabled; /* Ras Enabled for the function */
#define LPFC_RAS_DISABLE_LOGGING 0x00
#define LPFC_RAS_ENABLE_LOGGING 0x01
- bool ras_active; /* RAS logging running state */
+ enum ras_state state; /* RAS logging running state */
};
struct lpfc_hba {
@@ -725,6 +731,7 @@ struct lpfc_hba {
#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
@@ -830,6 +837,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
+ uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -872,7 +880,6 @@ struct lpfc_hba {
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
- uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
uint32_t cfg_delay_discovery;
@@ -990,7 +997,6 @@ struct lpfc_hba {
struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
- struct dma_pool *txrdy_payload_pool;
struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1055,6 +1061,7 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
+ struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1209,6 +1216,13 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+
+ struct hlist_node cpuhp; /* used for cpuhp per hba callback */
+ struct timer_list cpuhp_poll_timer;
+ struct list_head poll_list; /* slowpath eq polling list */
+#define LPFC_POLL_HB 1 /* slowpath heartbeat */
+#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
+#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
};
static inline struct Scsi_Host *
@@ -1299,6 +1313,26 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
+ * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
+ * @numa_mask: Pointer to phba's numa_mask member.
+ * @start: starting cpu index
+ *
+ * Note: If no valid cpu found, then nr_cpu_ids is returned.
+ *
+ **/
+static inline unsigned int
+lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+{
+ unsigned int cpu_it;
+
+ for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ if (cpu_online(cpu_it))
+ break;
+ }
+
+ return cpu_it;
+}
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 25aa7a53d255..4ff82b36a37a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int i;
int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
- unsigned long iflags = 0;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- rcu_read_lock();
scnprintf(tmp, sizeof(tmp),
"XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no,
@@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
+
+ spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock_irqsave(&vport->phba->hbalock, iflags);
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
@@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
- rcu_read_unlock();
+ spin_unlock_irq(shost->host_lock);
if (!lport)
goto buffer_done;
@@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE);
- /* RCU is already unlocked. */
+ /* host_lock is already unlocked. */
goto buffer_done;
- rcu_unlock_buf_done:
- rcu_read_unlock();
+ unlock_buf_done:
+ spin_unlock_irq(shost->host_lock);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -1475,8 +1475,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
int i;
msleep(100);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ return -EIO;
/* verify if privileged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
@@ -1486,8 +1487,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ continue;
if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
continue;
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
@@ -1642,7 +1644,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{
LPFC_MBOXQ_t *mbox = NULL;
unsigned long val = 0;
- char *pval = 0;
+ char *pval = NULL;
int rc = 0;
if (!strncmp("enable", buff_out,
@@ -3533,6 +3535,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2,
LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
LPFC_DELAY_INIT_LINK_INDEFINITELY,
"Suppress Link Up at initialization");
+
+static ssize_t
+lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.pls);
+}
+static DEVICE_ATTR(pls, 0444,
+ lpfc_pls_show, NULL);
+
+static ssize_t
+lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
+}
+static DEVICE_ATTR(pt, 0444,
+ lpfc_pt_show, NULL);
+
/*
# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
# 1 - (1024)
@@ -3580,9 +3607,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
- "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -4096,7 +4120,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ /*
+ * The 'topology' is not a configurable parameter if :
+ * - persistent topology enabled
+ * - G7 adapters
+ * - G6 with no private loop support
+ */
+
+ if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@@ -5298,7 +5331,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu);
- else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
+ else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5311,10 +5344,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "CPU %02d EQ %04d hdwq %04d "
+ "CPU %02d EQ None hdwq %04d "
"physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->eq, cpup->hdwq, cpup->phys_id,
+ cpup->hdwq, cpup->phys_id,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
@@ -5329,7 +5362,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5340,7 +5373,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
}
phba->sli4_hba.curr_disp_cpu++;
@@ -5711,7 +5744,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
* the driver will advertise it supports to the SCSI layer.
*
* 0 = Set nr_hw_queues by the number of CPUs or HW queues.
- * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ * 1,256 = Manually specify nr_hw_queue value to be advertised,
*
* Value range is [0,256]. Default value is 8.
*/
@@ -5729,30 +5762,130 @@ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
* A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
*
* 0 = Configure the number of hdw queues to the number of active CPUs.
- * 1,128 = Manually specify how many hdw queues to use.
+ * 1,256 = Manually specify how many hdw queues to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is 0.
*/
LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
+static inline void
+lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+{
+#if IS_ENABLED(CONFIG_X86)
+ /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ phba->cfg_irq_numa = 1;
+ else
+ phba->cfg_irq_numa = 0;
+#else
+ phba->cfg_irq_numa = 0;
+#endif
+}
+
/*
* lpfc_irq_chann: Set the number of IRQ vectors that are available
* for Hardware Queues to utilize. This also will map to the number
* of EQ / MSI-X vectors the driver will create. This should never be
* more than the number of Hardware Queues
*
- * 0 = Configure number of IRQ Channels to the number of active CPUs.
- * 1,128 = Manually specify how many IRQ Channels to use.
+ * 0 = Configure number of IRQ Channels to:
+ * if AMD architecture, number of CPUs on HBA's NUMA node
+ * otherwise, number of active CPUs.
+ * [1,256] = Manually specify how many IRQ Channels to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is [0].
*/
-LPFC_ATTR_R(irq_chann,
- LPFC_HBA_HDWQ_DEF,
- LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
- "Set the number of I/O IRQ Channels");
+static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
+module_param(lpfc_irq_chann, uint, 0444);
+MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
+
+/* lpfc_irq_chann_init - Set the hba irq_chann initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
+{
+ const struct cpumask *numa_mask;
+
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8532 use_msi = %u ignoring cfg_irq_numa\n",
+ phba->cfg_use_msi);
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return 0;
+ }
+
+ /* Check if default setting was passed */
+ if (val == LPFC_IRQ_CHANN_DEF)
+ lpfc_assign_default_irq_numa(phba);
+
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (cpumask_empty(numa_mask)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8533 Could not identify NUMA node, "
+ "ignoring cfg_irq_numa\n");
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ } else {
+ phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8543 lpfc_irq_chann set to %u "
+ "(numa)\n", phba->cfg_irq_chann);
+ }
+ } else {
+ if (val > LPFC_IRQ_CHANN_MAX) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8545 lpfc_irq_chann attribute cannot "
+ "be set to %u, allowed range is "
+ "[%u,%u]\n",
+ val,
+ LPFC_IRQ_CHANN_MIN,
+ LPFC_IRQ_CHANN_MAX);
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return -EINVAL;
+ }
+ phba->cfg_irq_chann = val;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_irq_chann_show - Display value of irq_chann
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
+}
+
+static DEVICE_ATTR_RO(lpfc_irq_chann);
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -5933,7 +6066,53 @@ LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
* [1-4] = Multiple of 1/4th Mb of host memory for FW logging
* Value range [0..4]. Default value is 0
*/
-LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+lpfc_param_show(ras_fwlog_buffsize);
+
+static ssize_t
+lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
+{
+ int ret = 0;
+ enum ras_state state;
+
+ if (!lpfc_rangecheck(val, 0, 4))
+ return -EINVAL;
+
+ if (phba->cfg_ras_fwlog_buffsize == val)
+ return 0;
+
+ if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
+ return -EINVAL;
+
+ spin_lock_irq(&phba->hbalock);
+ state = phba->ras_fwlog.state;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (state == REG_INPROGRESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
+ "registration is in progress\n");
+ return -EBUSY;
+ }
+
+ /* For disable logging: stop the logs and free the DMA.
+ * For ras_fwlog_buffsize size change we still need to free and
+ * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
+ */
+ phba->cfg_ras_fwlog_buffsize = val;
+ if (state == ACTIVE) {
+ lpfc_ras_stop_fwlog(phba);
+ lpfc_sli4_ras_dma_free(phba);
+ }
+
+ lpfc_sli4_ras_init(phba);
+ if (phba->ras_fwlog.ras_enabled)
+ ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ return ret;
+}
+
+lpfc_param_store(ras_fwlog_buffsize);
+static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
/*
* lpfc_ras_fwlog_level: Firmware logging verbosity level
@@ -6071,8 +6250,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
- &dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
+ &dev_attr_pls,
+ &dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
@@ -7085,11 +7265,22 @@ struct fc_function_template lpfc_vport_transport_functions = {
static void
lpfc_get_hba_function_mode(struct lpfc_hba *phba)
{
- /* If it's a SkyHawk FCoE adapter */
- if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
+ /* If the adapter supports FCoE mode */
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_SKYHAWK:
+ case PCI_DEVICE_ID_SKYHAWK_VF:
+ case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
+ case PCI_DEVICE_ID_ZEPHYR_DCSP:
+ case PCI_DEVICE_ID_HORNET:
+ case PCI_DEVICE_ID_TIGERSHARK:
+ case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
- else
+ break;
+ default:
+ /* for others, clear the flag */
phba->hba_flag &= ~HBA_FCOE_MODE;
+ }
}
/**
@@ -7099,6 +7290,7 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_ns_query_init(phba, lpfc_ns_query);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
@@ -7205,12 +7397,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
- lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
- lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
@@ -7256,11 +7446,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
}
if (!phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
- if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 39a736b887b1..d4e1b120cc9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5435,10 +5435,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
- if (ras_fwlog->ras_active == true)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+ spin_unlock_irq(&phba->hbalock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@@ -5495,10 +5497,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
- if (ras_fwlog->ras_active == false) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -ESRCH;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
@@ -5509,8 +5514,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status
* "Logging already Running" to caller.
**/
- if (ras_fwlog->ras_active)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@@ -5626,10 +5633,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error;
/* Logging to be stopped before reading */
- if (ras_fwlog->ras_active == true) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -EINPROGRESS;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b2ad8c750486..ee353c84a097 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
+int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
+void lpfc_sli4_poll_hbtimer(struct timer_list *t);
+void lpfc_sli4_start_polling(struct lpfc_queue *q);
+void lpfc_sli4_stop_polling(struct lpfc_queue *q);
+
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -586,6 +592,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
+void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 25e86706e207..99c9bb249758 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -763,9 +763,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x x%x "
- "sz x%x\n",
+ "x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
@@ -961,9 +963,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4105 NameServer Rsp Data: x%x x%x\n",
+ "4105 NameServer Rsp Data: x%x x%x "
+ "x%x x%x sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -1025,6 +1031,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6450 GID_PT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -1159,6 +1170,11 @@ out:
/* Link up / RSCN discovery */
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6451 GFF_ID cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
if (vport->num_disc_nodes == 0) {
/*
* The driver has cycled through all Nports in the RSCN payload.
@@ -1868,6 +1884,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ /* Driver aborted this IO. No retry as error
+ * is likely Offline->Online or some adapter
+ * error. Recovery will try again.
+ */
+ break;
case IOERR_ABORT_IN_PROGRESS:
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_ILLEGAL_FRAME:
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d34be60d379..2e6a68d9ea4f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -2078,6 +2079,96 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
}
#endif
+static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
+ char *buffer, int size)
+{
+ int copied = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(dmabuf, next,
+ &phba->ras_fwlog.fwlog_buff_list, list) {
+ memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
+ copied += LPFC_RAS_MAX_ENTRY_SIZE;
+ if (size > copied)
+ break;
+ }
+ return copied;
+}
+
+static int
+lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ rc = -EINVAL;
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize;
+ debug->buffer = vmalloc(size);
+ if (!debug->buffer)
+ goto free_debug;
+
+ debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size);
+ if (debug->len < 0) {
+ rc = -EINVAL;
+ goto free_buffer;
+ }
+ file->private_data = debug;
+
+ return 0;
+
+free_buffer:
+ vfree(debug->buffer);
+free_debug:
+ kfree(debug);
+out:
+ return rc;
+}
+
/**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
@@ -5286,6 +5377,16 @@ static const struct file_operations lpfc_debugfs_op_lockstat = {
};
#endif
+#undef lpfc_debugfs_ras_log
+static const struct file_operations lpfc_debugfs_ras_log = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_ras_log_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_ras_log_release,
+};
+#endif
+
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE,
@@ -5457,7 +5558,6 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.release = lpfc_idiag_cmd_release,
};
-#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
* @phba: Pointer to HBA context object.
@@ -5707,6 +5807,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* RAS log */
+ snprintf(name, sizeof(name), "ras_log");
+ phba->debug_ras_log =
+ debugfs_create_file(name, 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_ras_log);
+ if (!phba->debug_ras_log) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6148 Cannot create debugfs"
+ " ras_log\n");
+ goto debug_failed;
+ }
+
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
@@ -6117,6 +6230,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+ debugfs_remove(phba->debug_ras_log);
+ phba->debug_ras_log = NULL;
+
#ifdef LPFC_HDWQ_LOCK_STAT
debugfs_remove(phba->debug_lockstat); /* lockstat */
phba->debug_lockstat = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d5303994bfd6..42a2bf38eaea 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
+ char *mode;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* If we don't send GFT_ID to Fabric, a PRLI error
+ * could be expected.
+ */
+ if ((vport->fc_flag & FC_FABRIC) ||
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
+ mode = KERN_ERR;
+ else
+ mode = KERN_INFO;
+
/* PRLI failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
@@ -4291,6 +4301,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "3177 ELS response failed\n");
+ goto out;
+ }
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -4430,7 +4445,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
@@ -5260,6 +5275,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
}
}
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6452 Discover PLOGI %d flag x%x\n",
+ sentplogi, vport->fc_flag);
+
if (sentplogi) {
lpfc_set_disctmo(vport);
}
@@ -6455,7 +6475,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt;
int rscn_id = 0, hba_id = 0;
- int i;
+ int i, tmo;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6561,6 +6581,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED;
+
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies + msecs_to_jiffies(1000 * tmo));
+ }
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
@@ -6663,9 +6690,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
- vport->port_state);
+ vport->port_state, vport->num_disc_nodes,
+ vport->gidft_inp);
/* To process RSCN, first compare RSCN data with NameServer */
vport->fc_ns_retry = 0;
@@ -7986,20 +8014,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
+ unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
+
/*
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag
* ultimately grabs the ring_lock, the driver must splice the list into
* a working list and release the locks before calling the abort.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -8014,6 +8044,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport)
continue;
+ if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
@@ -8037,21 +8070,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
@@ -8091,7 +8124,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 749286acdc17..85ada3deb47d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba)
if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
- if (phba->link_state >= LPFC_LINK_UP ||
+ /* Driver could have abort request completed in queue
+ * when link goes down. Allow for this transition.
+ */
+ if (phba->link_state >= LPFC_LINK_DOWN ||
phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -1135,7 +1138,6 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus)
goto out;
@@ -1162,17 +1164,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI) {
- if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
- bbscn = bf_get(lpfc_bbscn_def,
- &phba->sli4_hba.bbscn_params);
- vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
- vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
- }
+ if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
- } else if (vport->fc_flag & FC_PT2PT) {
+ else if (vport->fc_flag & FC_PT2PT)
lpfc_disc_start(vport);
- }
+
return;
out:
@@ -3456,8 +3452,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1313 Link Down UNEXP WWPN Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "1313 Link Down Unexpected FA WWPN Event x%x "
+ "received Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
@@ -4046,7 +4042,7 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -4575,8 +4571,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp;
free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_free_rpi(vport->phba, rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
return NULL;
}
@@ -4835,12 +4833,51 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
}
/*
+ * Sets the mailbox completion handler to be used for the
+ * unreg_rpi command. The handler varies based on the state of
+ * the port and what will be happening to the rpi next.
+ */
+static void
+lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+
+ } else if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (kref_read(&ndlp->kref) > 0)) {
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else {
+ if (vport->load_flag & FC_UNLOADING) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ lpfc_nlp_get(ndlp);
+ }
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+}
+
+/*
* Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing
* a LPFC_NODELIST entry. It is also called if the driver initiates a
@@ -4860,7 +4897,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"3366 RPI x%x needs to be "
"unregistered nlp_flag x%x "
"did x%x\n",
@@ -4871,7 +4909,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* no need to queue up another one.
*/
if (ndlp->nlp_flag & NLP_UNREG_INP) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
"Data: x%px\n",
@@ -4890,39 +4929,19 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
- } else {
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- (!(vport->load_flag & FC_UNLOADING)) &&
- (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) >=
- LPFC_SLI_INTF_IF_TYPE_2) &&
- (kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->mbox_cmpl =
- lpfc_sli4_unreg_rpi_cmpl_clr;
- /*
- * accept PLOGIs after unreg_rpi_cmpl
- */
- acc_plogi = 0;
- } else if (vport->load_flag & FC_UNLOADING) {
- mbox->ctx_ndlp = NULL;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- } else {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- }
- }
+ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
+ /*
+ * accept PLOGIs after unreg_rpi_cmpl
+ */
+ acc_plogi = 0;
if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
ndlp->nlp_flag |= NLP_UNREG_INP;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on "
"NPort x%x deferred flg x%x "
"Data:x%px\n",
@@ -5057,6 +5076,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5138,8 +5158,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cleanup_vports_rrqs(vport, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- lpfc_unreg_rpi(vport, ndlp);
-
+ if (!lpfc_unreg_rpi(vport, ndlp)) {
+ /* Clean up unregistered and non freed rpis */
+ if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
+ !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
+ lpfc_sli4_free_rpi(vport->phba,
+ ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ }
return 0;
}
@@ -5165,8 +5197,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
+ "ref %d map:x%x ndlp x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5203,8 +5237,9 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
"0940 removed node x%px DID x%x "
- " rport not null x%px\n",
- ndlp, ndlp->nlp_DID, ndlp->rport);
+ "rpi %d rport not null x%px\n",
+ ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->rport);
rport = ndlp->rport;
rdata = rport->dd_data;
rdata->pnode = NULL;
@@ -5362,6 +5397,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6453 Setup New Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5375,6 +5417,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
"0014 Could not enable ndlp\n");
return NULL;
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6454 Setup Enabled Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5394,6 +5442,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6455 Setup RSCN Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* NVME Target mode waits until rport is known to be
* impacted by the RSCN before it transitions. No
* active management - just go to NPR provided the
@@ -5405,15 +5459,32 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6456 Skip Setup RSCN Node x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
+ }
} else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6457 Setup Active Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* If the initiator received a PLOGI from this NPort or if the
* initiator is already in the process of discovery on it,
* there's no need to try to discover it again.
@@ -5565,10 +5636,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0202 Start Discovery hba state x%x "
- "Data: x%x x%x x%x\n",
+ "0202 Start Discovery port state x%x "
+ "flg x%x Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ vport->fc_adisc_cnt, vport->fc_npr_cnt);
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5996,7 +6067,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -6185,12 +6256,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0007 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0007 Init New ndlp x%px, rpi:x%x DID:%x "
+ "flg:x%x refcnt:%d map:x%x\n",
+ ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map);
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6419,7 +6490,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1;
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2624 RPI %x DID %x flag %x "
"still logged in\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bd533475c86a..25cdcbc2b02f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -210,7 +210,6 @@ struct lpfc_sli_intf {
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 0
-#define LPFC_IMAX_THRESHOLD 1000
#define LPFC_MAX_AUTO_EQ_DELAY 120
#define LPFC_EQ_DELAY_STEP 15
#define LPFC_EQD_ISR_TRIGGER 20000
@@ -2320,6 +2319,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B
+#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -2809,6 +2809,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_trunk_SHIFT 12
#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
#define lpfc_mbx_rd_conf_trunk_WORD word2
+#define lpfc_mbx_rd_conf_pt_SHIFT 20
+#define lpfc_mbx_rd_conf_pt_MASK 0x00000003
+#define lpfc_mbx_rd_conf_pt_WORD word2
+#define lpfc_mbx_rd_conf_tf_SHIFT 22
+#define lpfc_mbx_rd_conf_tf_MASK 0x00000001
+#define lpfc_mbx_rd_conf_tf_WORD word2
+#define lpfc_mbx_rd_conf_ptv_SHIFT 23
+#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001
+#define lpfc_mbx_rd_conf_ptv_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -3479,6 +3488,9 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001
@@ -3518,6 +3530,7 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x11
+#define LPFC_SET_DUAL_DUMP 0x1e
struct lpfc_mbx_set_feature {
struct mbox_header header;
uint32_t feature;
@@ -3532,6 +3545,15 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+#define lpfc_mbx_set_feature_dd_SHIFT 0
+#define lpfc_mbx_set_feature_dd_MASK 0x00000001
+#define lpfc_mbx_set_feature_dd_WORD word6
+#define lpfc_mbx_set_feature_ddquery_SHIFT 1
+#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001
+#define lpfc_mbx_set_feature_ddquery_WORD word6
+#define LPFC_DISABLE_DUAL_DUMP 0
+#define LPFC_ENABLE_DUAL_DUMP 1
+#define LPFC_QUERY_OP_DUAL_DUMP 2
uint32_t word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
@@ -4261,6 +4283,8 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
+#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
};
/*
@@ -4659,6 +4683,7 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
+#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
@@ -4807,8 +4832,8 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame;
};
-#define MAGIC_NUMER_G6 0xFEAA0003
-#define MAGIC_NUMER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G6 0xFEAA0003
+#define MAGIC_NUMBER_G7 0xFEAA0005
struct lpfc_grp_hdr {
uint32_t size;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e8813d26e594..6298b1729098 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -40,6 +40,8 @@
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/crash_dump.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -66,9 +68,13 @@
#include "lpfc_version.h"
#include "lpfc_ids.h"
+static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_add(struct lpfc_hba *phba);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -1235,10 +1241,9 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
struct lpfc_hba, eq_delay_work);
struct lpfc_eq_intr_info *eqi, *eqi_new;
struct lpfc_queue *eq, *eq_next;
- unsigned char *eqcnt = NULL;
+ unsigned char *ena_delay = NULL;
uint32_t usdelay;
int i;
- bool update = false;
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
return;
@@ -1247,44 +1252,36 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
phba->pport->fc_flag & FC_OFFLINE_MODE)
goto requeue;
- eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
- GFP_KERNEL);
- if (!eqcnt)
+ ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
+ GFP_KERNEL);
+ if (!ena_delay)
goto requeue;
- if (phba->cfg_irq_chann > 1) {
- /* Loop thru all IRQ vectors */
- for (i = 0; i < phba->cfg_irq_chann; i++) {
- /* Get the EQ corresponding to the IRQ vector */
- eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (!eq)
- continue;
- if (eq->q_mode) {
- update = true;
- break;
- }
- if (eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+ if (!eq)
+ continue;
+ if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
+ eq->q_flag &= ~HBA_EQ_DELAY_CHK;
+ ena_delay[eq->last_cpu] = 1;
}
- } else
- update = true;
+ }
for_each_present_cpu(i) {
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
- if (!update && eqcnt[i] < 2) {
- eqi->icnt = 0;
- continue;
+ if (ena_delay[i]) {
+ usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+ } else {
+ usdelay = 0;
}
- usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
- LPFC_EQ_DELAY_STEP;
- if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
- usdelay = LPFC_MAX_AUTO_EQ_DELAY;
-
eqi->icnt = 0;
list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
- if (eq->last_cpu != i) {
+ if (unlikely(eq->last_cpu != i)) {
eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
eq->last_cpu);
list_move_tail(&eq->cpu_list, &eqi_new->list);
@@ -1296,7 +1293,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
}
}
- kfree(eqcnt);
+ kfree(ena_delay);
requeue:
queue_delayed_work(phba->wq, &phba->eq_delay_work,
@@ -3053,11 +3050,12 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
continue;
}
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0009 rpi:%x DID:%x "
- "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0009 Assign RPI x%x to ndlp x%px "
+ "DID:x%06x flg:x%x map:x%x\n",
+ ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_usg_map);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3387,6 +3385,8 @@ lpfc_online(struct lpfc_hba *phba)
if (phba->cfg_xri_rebalancing)
lpfc_create_multixri_pools(phba);
+ lpfc_cpuhp_add(phba);
+
lpfc_unblock_mgmt_io(phba);
return 0;
}
@@ -3453,10 +3453,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if ((!NLP_CHK_NODE_ACT(ndlp)) ||
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* Driver must assume RPI is invalid for
+ * any unused or inactive node.
+ */
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
continue;
+ }
+
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
NULL, NLP_EVT_DEVICE_RECOVERY);
@@ -3472,16 +3477,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport,
- KERN_INFO, LOG_NODE,
- "0011 lpfc_offline: "
- "ndlp:x%px did %x "
- "usgmap:x%x rpi:%x\n",
- ndlp, ndlp->nlp_DID,
- ndlp->nlp_usg_map,
- ndlp->nlp_rpi);
-
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0011 Free RPI x%x on "
+ "ndlp:x%px did x%x "
+ "usgmap:x%x\n",
+ ndlp->nlp_rpi, ndlp,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -3545,6 +3550,7 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -5283,10 +5289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
+ "2901 Async SLI event - Type:%d, Event Data: x%08x "
+ "x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- evt_type);
+ acqe_sli->reserved, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -5433,11 +5439,26 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
+ /* Misconfigured WWN. Reports that the SLI Port is configured
+ * to use FA-WWN, but the attached device doesn’t support it.
+ * No driver action is required.
+ * Event Data1 - N.A, Event Data2 - N.A
+ */
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
+ "2699 Misconfigured FA-WWN - Attached device does "
+ "not support FA-WWN\n");
+ break;
+ case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
+ /* EEPROM failure. No driver action is required */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2518 EEPROM failure - "
+ "Event Data1: x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3193 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
- acqe_sli->event_data1, acqe_sli->event_data2,
+ "3193 Unrecognized SLI event, type: 0x%x",
evt_type);
break;
}
@@ -5976,6 +5997,29 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
+ * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
+ * @phba: Pointer to HBA context object.
+ *
+ **/
+static void
+lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, numa_node;
+ struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
+
+ cpumask_clear(numa_mask);
+
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE)
+ return;
+
+ for_each_possible_cpu(cpu)
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, numa_mask);
+}
+
+/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6416,8 +6460,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
u32 if_fam;
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
- phba->sli4_hba.num_possible_cpu = num_possible_cpus();
+ phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0;
+ lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6953,6 +6998,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
+ cpumask_clear(&phba->sli4_hba.numa_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -7126,7 +7172,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __func__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, iocb_count);
goto out_free_iocbq;
}
@@ -7545,18 +7591,10 @@ lpfc_create_shost(struct lpfc_hba *phba)
if (phba->nvmet_support) {
/* Only 1 vport (pport) will support NVME target */
- if (phba->txrdy_payload_pool == NULL) {
- phba->txrdy_payload_pool = dma_pool_create(
- "txrdy_pool", &phba->pcidev->dev,
- TXRDY_PAYLOAD_LEN, 16, 0);
- if (phba->txrdy_payload_pool) {
- phba->targetport = NULL;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
- lpfc_printf_log(phba, KERN_INFO,
- LOG_INIT | LOG_NVME_DISC,
- "6076 NVME Target Found\n");
- }
- }
+ phba->targetport = NULL;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
+ "6076 NVME Target Found\n");
}
lpfc_debugfs_initialize(vport);
@@ -8235,6 +8273,94 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
}
+static const char * const lpfc_topo_to_str[] = {
+ "Loop then P2P",
+ "Loopback",
+ "P2P Only",
+ "Unsupported",
+ "Loop Only",
+ "Unsupported",
+ "P2P then Loop",
+};
+
+/**
+ * lpfc_map_topology - Map the topology read from READ_CONFIG
+ * @phba: pointer to lpfc hba data structure.
+ * @rdconf: pointer to read config data
+ *
+ * This routine is invoked to map the topology values as read
+ * from the read config mailbox command. If the persistent
+ * topology feature is supported, the firmware will provide the
+ * saved topology information to be used in INIT_LINK
+ *
+ **/
+#define LINK_FLAGS_DEF 0x0
+#define LINK_FLAGS_P2P 0x1
+#define LINK_FLAGS_LOOP 0x2
+static void
+lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
+{
+ u8 ptv, tf, pt;
+
+ ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
+ tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
+ pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
+ ptv, tf, pt);
+ if (!ptv) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2019 FW does not support persistent topology "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ return;
+ }
+ /* FW supports persistent topology - override module parameter value */
+ phba->hba_flag |= HBA_PERSISTENT_TOPO;
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ if (tf || (pt == LINK_FLAGS_LOOP)) {
+ /* Invalid values from FW - use driver params */
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ } else {
+ /* Prism only supports PT2PT topology */
+ phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
+ }
+ break;
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ if (!tf) {
+ phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
+ ? FLAGS_TOPOLOGY_MODE_LOOP
+ : FLAGS_TOPOLOGY_MODE_PT_PT);
+ } else {
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ }
+ break;
+ default: /* G5 */
+ if (tf) {
+ /* If topology failover set - pt is '0' or '1' */
+ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
+ FLAGS_TOPOLOGY_MODE_LOOP_PT);
+ } else {
+ phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
+ ? FLAGS_TOPOLOGY_MODE_PT_PT
+ : FLAGS_TOPOLOGY_MODE_LOOP);
+ }
+ break;
+ }
+ if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2020 Using persistent topology value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2021 Invalid topology values from FW "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ }
+}
+
/**
* lpfc_sli4_read_config - Get the config parameters.
* @phba: pointer to lpfc hba data structure.
@@ -8346,6 +8472,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+ lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
"XRI(B:%d M:%d), "
@@ -8619,8 +8746,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
*/
if (phba->nvmet_support) {
- if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
}
@@ -9160,6 +9287,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_cleanup_poll_list(phba);
+
/* Release HBA eqs */
if (phba->sli4_hba.hdwq)
lpfc_sli4_release_hdwq(phba);
@@ -10581,7 +10710,6 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
*/
if ((match == LPFC_FIND_BY_EQ) &&
(cpup->flag & LPFC_CPU_FIRST_IRQ) &&
- (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
@@ -10619,6 +10747,75 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
}
#endif
+/*
+ * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
+ * @phba: pointer to lpfc hba data structure.
+ * @eqidx: index for eq and irq vector
+ * @flag: flags to set for vector_map structure
+ * @cpu: cpu used to index vector_map structure
+ *
+ * The routine assigns eq info into vector_map structure
+ */
+static inline void
+lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
+ unsigned int cpu)
+{
+ struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
+ struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
+
+ cpup->eq = eqidx;
+ cpup->flag |= flag;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
+ cpu, eqhdl->irq, cpup->eq, cpup->flag);
+}
+
+/**
+ * lpfc_cpu_map_array_init - Initialize cpu_map structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the cpu_map array structure
+ */
+static void
+lpfc_cpu_map_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_eq_intr_info *eqi;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
+ INIT_LIST_HEAD(&eqi->list);
+ eqi->icnt = 0;
+ }
+}
+
+/**
+ * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the hba_eq_hdl array structure
+ */
+static void
+lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_hba_eq_hdl *eqhdl;
+ int i;
+
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eqhdl = lpfc_get_eq_hdl(i);
+ eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->phba = phba;
+ }
+}
+
/**
* lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
@@ -10637,22 +10834,10 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *new_cpup;
- const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
- /* Init cpu_map array */
- for_each_possible_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
- cpup->eq = LPFC_VECTOR_MAP_EMPTY;
- cpup->irq = LPFC_VECTOR_MAP_EMPTY;
- cpup->flag = 0;
- }
-
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
max_core_id = 0;
@@ -10688,65 +10873,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
min_core_id = cpup->core_id;
}
- for_each_possible_cpu(i) {
- struct lpfc_eq_intr_info *eqi =
- per_cpu_ptr(phba->sli4_hba.eq_info, i);
-
- INIT_LIST_HEAD(&eqi->list);
- eqi->icnt = 0;
- }
-
- /* This loop sets up all CPUs that are affinitized with a
- * irq vector assigned to the driver. All affinitized CPUs
- * will get a link to that vectors IRQ and EQ.
- *
- * NULL affinity mask handling:
- * If irq count is greater than one, log an error message.
- * If the null mask is received for the first irq, find the
- * first present cpu, and assign the eq index to ensure at
- * least one EQ is assigned.
- */
- for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- /* Get a CPU mask for all CPUs affinitized to this vector */
- maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp) {
- if (phba->cfg_irq_chann > 1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3329 No affinity mask found "
- "for vector %d (%d)\n",
- idx, phba->cfg_irq_chann);
- if (!idx) {
- cpu = cpumask_first(cpu_present_mask);
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- }
- break;
- }
-
- i = 0;
- /* Loop through all CPUs associated with vector idx */
- for_each_cpu_and(cpu, maskp, cpu_present_mask) {
- /* Set the EQ index and IRQ for that vector */
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
-
- /* If this is the first CPU thats assigned to this
- * vector, set LPFC_CPU_FIRST_IRQ.
- */
- if (!i)
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- i++;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d flag x%x\n",
- cpu, cpup->irq, cpup->eq, cpup->flag);
- }
- }
-
/* After looking at each irq vector assigned to this pcidev, its
* possible to see that not ALL CPUs have been accounted for.
* Next we will set any unassigned (unaffinitized) cpu map
@@ -10772,7 +10898,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
new_cpu = cpumask_next(
@@ -10785,7 +10911,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
found_same:
/* We found a matching phys_id, so copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10797,9 +10922,10 @@ found_same:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
- "irq %d from id %d same "
+ "eq %d from peer cpu %d same "
"phys_id (%d)\n",
- cpu, cpup->irq, new_cpu, cpup->phys_id);
+ cpu, cpup->eq, new_cpu,
+ cpup->phys_id);
}
}
@@ -10823,7 +10949,7 @@ found_same:
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
@@ -10833,13 +10959,12 @@ found_same:
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3339 Set Affinity: CPU %d "
- "irq %d UNASSIGNED\n",
- cpup->hdwq, cpup->irq);
+ "eq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->eq);
continue;
found_any:
/* We found an available entry, copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10851,8 +10976,8 @@ found_any:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
- "irq %d from id %d (%d/%d)\n",
- cpu, cpup->irq, new_cpu,
+ "eq %d from peer cpu %d (%d/%d)\n",
+ cpu, cpup->eq, new_cpu,
new_cpup->phys_id, new_cpup->core_id);
}
}
@@ -10873,11 +10998,11 @@ found_any:
idx++;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
}
- /* Finally we need to associate a hdwq with each cpu_map entry
+ /* Associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
@@ -10951,9 +11076,26 @@ found_any:
logit:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
+ }
+
+ /*
+ * Initialize the cpu_map slots for not-present cpus in case
+ * a cpu is hot-added. Perform a simple hdwq round robin assignment.
+ */
+ idx = 0;
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
+
+ cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3340 Set Affinity: not present "
+ "CPU %d hdwq %d\n",
+ cpu, cpup->hdwq);
}
/* The cpu_map array will be used later during initialization
@@ -10963,11 +11105,280 @@ found_any:
}
/**
+ * lpfc_cpuhp_get_eq
+ *
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: cpu going offline
+ * @eqlist:
+ */
+static void
+lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
+ struct list_head *eqlist)
+{
+ const struct cpumask *maskp;
+ struct lpfc_queue *eq;
+ cpumask_t tmp;
+ u16 idx;
+
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
+ if (!maskp)
+ continue;
+ /*
+ * if irq is not affinitized to the cpu going
+ * then we don't need to poll the eq attached
+ * to it.
+ */
+ if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ continue;
+ /* get the cpus that are online and are affini-
+ * tized to this irq vector. If the count is
+ * more than 1 then cpuhp is not going to shut-
+ * down this vector. Since this cpu has not
+ * gone offline yet, we need >1.
+ */
+ cpumask_and(&tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(&tmp) > 1)
+ continue;
+
+ /* Now that we have an irq to shutdown, get the eq
+ * mapped to this irq. Note: multiple hdwq's in
+ * the software can share an eq, but eventually
+ * only eq will be mapped to this vector
+ */
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ list_add(&eq->_poll_list, eqlist);
+ }
+}
+
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+ /*
+ * unregistering the instance doesn't stop the polling
+ * timer. Wait for the poll timer to retire.
+ */
+ synchronize_rcu();
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ __lpfc_cpuhp_remove(phba);
+}
+
+static void lpfc_cpuhp_add(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ rcu_read_lock();
+
+ if (!list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ rcu_read_unlock();
+
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+}
+
+static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
+{
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ *retval = -EAGAIN;
+ return true;
+ }
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ *retval = 0;
+ return true;
+ }
+
+ /* proceed with the hotplug */
+ return false;
+}
+
+/**
+ * lpfc_irq_set_aff - set IRQ affinity
+ * @eqhdl: EQ handle
+ * @cpu: cpu to set affinity
+ *
+ **/
+static inline void
+lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ cpumask_set_cpu(cpu, &eqhdl->aff_mask);
+ irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_clear_aff - clear IRQ affinity
+ * @eqhdl: EQ handle
+ *
+ **/
+static inline void
+lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
+ * @phba: pointer to HBA context object.
+ * @cpu: cpu going offline/online
+ * @offline: true, cpu is going offline. false, cpu is coming online.
+ *
+ * If cpu is going offline, we'll try our best effort to find the next
+ * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ *
+ * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ *
+ * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
+ *
+ **/
+static void
+lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct cpumask *aff_mask;
+ unsigned int cpu_select, cpu_next, idx;
+ const struct cpumask *numa_mask;
+
+ if (!phba->cfg_irq_numa)
+ return;
+
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (!cpumask_test_cpu(cpu, numa_mask))
+ return;
+
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ return;
+
+ if (offline) {
+ /* Find next online CPU on NUMA node */
+ cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+
+ /* Found a valid CPU */
+ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
+ /* Go through each eqhdl and ensure offlining
+ * cpu aff_mask is migrated
+ */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ aff_mask = lpfc_get_aff_mask(idx);
+
+ /* Migrate affinity */
+ if (cpumask_test_cpu(cpu, aff_mask))
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
+ cpu_select);
+ }
+ } else {
+ /* Rely on irqbalance if no online CPUs left on NUMA */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++)
+ lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
+ }
+ } else {
+ /* Migrate affinity back to this CPU */
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
+ }
+}
+
+static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ LIST_HEAD(eqlist);
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, true);
+
+ lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+
+ /* start polling on these eq's */
+ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
+ list_del_init(&eq->_poll_list);
+ lpfc_sli4_start_polling(eq);
+ }
+
+ return 0;
+}
+
+static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ unsigned int n;
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, false);
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
+ n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
+ if (n == cpu)
+ lpfc_sli4_stop_polling(eq);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec.
+ * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
+ * to cpus on the system.
+ *
+ * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
+ * the number of cpus on the same numa node as this adapter. The vectors are
+ * allocated without requesting OS affinity mapping. A vector will be
+ * allocated and assigned to each online and offline cpu. If the cpu is
+ * online, then affinity will be set to that cpu. If the cpu is offline, then
+ * affinity will be set to the nearest peer cpu within the numa node that is
+ * online. If there are no online cpus within the numa node, affinity is not
+ * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
+ * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
+ * configured.
+ *
+ * If numa mode is not enabled and there is more than 1 vector allocated, then
+ * the driver relies on the managed irq interface where the OS assigns vector to
+ * cpu affinity. The driver will then use that affinity mapping to setup its
+ * cpu mapping table.
*
* Return codes
* 0 - successful
@@ -10978,13 +11389,31 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
+ const struct cpumask *numa_mask = NULL;
+ unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
+ struct lpfc_hba_eq_hdl *eqhdl;
+ const struct cpumask *maskp;
+ bool first;
+ unsigned int flags = PCI_IRQ_MSIX;
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- rc = pci_alloc_irq_vectors(phba->pcidev,
- 1,
- vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+ cpu_cnt = cpumask_weight(numa_mask);
+ vectors = min(phba->cfg_irq_chann, cpu_cnt);
+
+ /* cpu: iterates over numa_mask including offline or online
+ * cpu_select: iterates over online numa_mask to set affinity
+ */
+ cpu = cpumask_first(numa_mask);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else {
+ flags |= PCI_IRQ_AFFINITY;
+ }
+
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
@@ -10994,23 +11423,61 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ eqhdl = lpfc_get_eq_hdl(index);
+ name = eqhdl->handler_name;
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl->idx = index;
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
"request_irq failed (%d)\n", index, rc);
goto cfg_fail_out;
}
+
+ eqhdl->irq = pci_irq_vector(phba->pcidev, index);
+
+ if (phba->cfg_irq_numa) {
+ /* If found a neighboring online cpu, set affinity */
+ if (cpu_select < nr_cpu_ids)
+ lpfc_irq_set_aff(eqhdl, cpu_select);
+
+ /* Assign EQ to cpu_map */
+ lpfc_assign_eq_map_info(phba, index,
+ LPFC_CPU_FIRST_IRQ,
+ cpu);
+
+ /* Iterate to next offline or online cpu in numa_mask */
+ cpu = cpumask_next(cpu, numa_mask);
+
+ /* Find next online cpu in numa_mask to set affinity */
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else if (vectors == 1) {
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
+ cpu);
+ } else {
+ maskp = pci_irq_get_affinity(phba->pcidev, index);
+
+ first = true;
+ /* Loop through all CPUs associated with vector index */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ /* If this is the first CPU thats assigned to
+ * this vector, set LPFC_CPU_FIRST_IRQ.
+ */
+ lpfc_assign_eq_map_info(phba, index,
+ first ?
+ LPFC_CPU_FIRST_IRQ : 0,
+ cpu);
+ if (first)
+ first = false;
+ }
+ }
}
if (vectors != phba->cfg_irq_chann) {
@@ -11020,17 +11487,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_irq_chann, vectors);
if (phba->cfg_irq_chann > vectors)
phba->cfg_irq_chann = vectors;
- if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
- phba->cfg_nvmet_mrq = vectors;
}
return rc;
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ for (--index; index >= 0; index--) {
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
+ }
/* Unconfigure MSI-X capability structure */
pci_free_irq_vectors(phba->pcidev);
@@ -11057,6 +11525,8 @@ static int
lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
+ unsigned int cpu;
+ struct lpfc_hba_eq_hdl *eqhdl;
rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
@@ -11078,9 +11548,15 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
+
for (index = 0; index < phba->cfg_irq_chann; index++) {
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl = lpfc_get_eq_hdl(index);
+ eqhdl->idx = index;
}
return 0;
@@ -11138,15 +11614,21 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
struct lpfc_hba_eq_hdl *eqhdl;
+ unsigned int cpu;
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
+ cpu);
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl = lpfc_get_eq_hdl(idx);
eqhdl->idx = idx;
- eqhdl->phba = phba;
}
}
}
@@ -11168,14 +11650,14 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX) {
int index;
+ struct lpfc_hba_eq_hdl *eqhdl;
/* Free up MSI-X multi-message vectors */
for (index = 0; index < phba->cfg_irq_chann; index++) {
- irq_set_affinity_hint(
- pci_irq_vector(phba->pcidev, index),
- NULL);
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
}
} else {
free_irq(phba->pcidev->irq, phba);
@@ -11367,6 +11849,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Wait for completion of device XRI exchange busy */
lpfc_sli4_xri_exchange_busy_wait(phba);
+ /* per-phba callback de-registration for hotplug event */
+ lpfc_cpuhp_remove(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
@@ -11538,6 +12023,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
+ sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -11589,13 +12075,10 @@ fcponly:
}
/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
+ * accommodate 512K and 1M IOs in a single nvme buf.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
@@ -12312,35 +12795,57 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
}
-static void
+static int
lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
- if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
+ int rc;
+
+ /* Three cases: (1) FW was not supported on the detected adapter.
+ * (2) FW update has been locked out administratively.
+ * (3) Some other error during FW update.
+ * In each case, an unmaskable message is written to the console
+ * for admin diagnosis.
+ */
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- magic_number != MAGIC_NUMER_G6) ||
+ magic_number != MAGIC_NUMBER_G6) ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMER_G7))
+ magic_number != MAGIC_NUMBER_G7)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3030 This firmware version is not supported on "
- "this HBA model. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
- else
+ "3030 This firmware version is not supported on"
+ " this HBA model. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EINVAL;
+ } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3022 FW Download failed. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
+ "3021 Firmware downloads have been prohibited "
+ "by a system configuration setting on "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EACCES;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 FW Download failed. Add Status x%x "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ offset, phba->pcidev->device, magic_number,
+ ftype, fid, fsize, fw->size);
+ rc = -EIO;
+ }
+ return rc;
}
-
/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
- * @phba: pointer to lpfc hba data structure.
+ * @context: pointer to firmware image returned from request_firmware.
+ * @ret: return value this routine provides to the caller.
*
**/
static void
@@ -12409,8 +12914,12 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
if (rc) {
- lpfc_log_write_firmware_error(phba, offset,
- magic_number, ftype, fid, fsize, fw);
+ rc = lpfc_log_write_firmware_error(phba, offset,
+ magic_number,
+ ftype,
+ fid,
+ fsize,
+ fw);
goto release_out;
}
}
@@ -12430,9 +12939,12 @@ release_out:
}
release_firmware(fw);
out:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.\n", rc);
- return;
+ if (rc < 0)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3062 Firmware update error, status %d.\n", rc);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update success: size %d.\n", rc);
}
/**
@@ -12551,6 +13063,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->pport = NULL;
lpfc_stop_port(phba);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
+
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -12632,6 +13150,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
+ INIT_LIST_HEAD(&phba->poll_list);
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
+
return 0;
out_free_sysfs_attr:
@@ -13344,8 +13865,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
phba->cfg_fof = 1;
} else {
phba->cfg_fof = 0;
- if (phba->device_data_mem_pool)
- mempool_destroy(phba->device_data_mem_pool);
+ mempool_destroy(phba->device_data_mem_pool);
phba->device_data_mem_pool = NULL;
}
@@ -13450,11 +13970,24 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "lpfc/sli4:online",
+ lpfc_cpu_online, lpfc_cpu_offline);
+ if (error < 0)
+ goto cpuhp_failure;
+ lpfc_cpuhp_state = error;
+
error = pci_register_driver(&lpfc_driver);
- if (error) {
- fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
- }
+ if (error)
+ goto unwind;
+
+ return error;
+
+unwind:
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
+cpuhp_failure:
+ fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
return error;
}
@@ -13471,6 +14004,7 @@ lpfc_exit(void)
{
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
idr_destroy(&lpfc_hba_index);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index ea10f03437f5..148d02a27b58 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -46,6 +46,23 @@
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
+/* generate message by verbose log setting or severity */
+#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
+{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
+
+#define lpfc_log_msg(phba, level, mask, fmt, arg...) \
+do { \
+ { uint32_t log_verbose = (phba)->pport ? \
+ (phba)->pport->cfg_log_verbose : \
+ (phba)->cfg_log_verbose; \
+ if (((mask) & log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+ fmt, phba->brd_no, ##arg); \
+ } \
+} while (0)
+
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8abe933bad09..d1773c01d2b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba,
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ !(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ae09bb863497..7082279e4c01 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -230,9 +230,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
- dma_pool_destroy(phba->txrdy_payload_pool);
- phba->txrdy_payload_pool = NULL;
-
dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index fc6e4546d738..ae4359013846 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -279,6 +279,55 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
+/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+ * @phba: pointer to lpfc hba data structure.
+ * @link_mbox: pointer to CONFIG_LINK mailbox object
+ *
+ * This routine is only called if we are SLI3, direct connect pt2pt
+ * mode and the remote NPort issues the PLOGI after link up.
+ */
+static void
+lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+{
+ LPFC_MBOXQ_t *login_mbox;
+ MAILBOX_t *mb = &link_mbox->u.mb;
+ struct lpfc_iocbq *save_iocb;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ ndlp = link_mbox->ctx_ndlp;
+ login_mbox = link_mbox->context3;
+ save_iocb = login_mbox->context3;
+ link_mbox->context3 = NULL;
+ login_mbox->context3 = NULL;
+
+ /* Check for CONFIG_LINK error */
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
+ mb->mbxStatus);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ return;
+ }
+
+ /* Now that CONFIG_LINK completed, and our SID is configured,
+ * we can now proceed with sending the PLOGI ACC.
+ */
+ rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, login_mbox);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4576 PLOGI ACC fails pt2pt discovery: %x\n",
+ rc);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ }
+
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+}
+
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -291,10 +340,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
- LPFC_MBOXQ_t *mbox;
+ LPFC_MBOXQ_t *link_mbox;
+ LPFC_MBOXQ_t *login_mbox;
+ struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
- int rc;
+ int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -343,6 +394,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
ndlp->nlp_fcp_info |= CLASS3;
+ defer_acc = 0;
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -354,7 +406,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
-
/* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
@@ -396,6 +447,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ login_mbox = NULL;
+ link_mbox = NULL;
+ save_iocb = NULL;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -423,17 +478,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
else {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
+ defer_acc = 1;
+ link_mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!link_mbox)
goto out;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_config_link(phba, link_mbox);
+ link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+
+ save_iocb = lpfc_sli_get_iocbq(phba);
+ if (!save_iocb)
goto out;
- }
+ /* Save info from cmd IOCB used in rsp */
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
}
lpfc_can_disctmo(vport);
@@ -448,8 +508,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!login_mbox)
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
@@ -457,21 +517,19 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
- (uint8_t *) sp, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
+ if (rc)
goto out;
- }
/* ACC PLOGI rsp command needs to execute first,
- * queue this mbox command to be processed later.
+ * queue this login_mbox command to be processed later.
*/
- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
/*
- * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
+ * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
- mbox->vport = vport;
+ login_mbox->vport = vport;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
spin_unlock_irq(shost->host_lock);
@@ -484,8 +542,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -504,16 +564,47 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, mbox);
+ ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ if (defer_acc) {
+ /* So the order here should be:
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * Issue PLOGI ACC
+ * PLOGI ACC cmpl
+ * Issue REG_LOGIN mbox
+ */
+
+ /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
+ link_mbox->context3 = login_mbox;
+ login_mbox->context3 = save_iocb;
+
+ /* Start the ball rolling by issuing CONFIG_LINK here */
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
return 1;
}
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
out:
+ if (defer_acc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4577 pt2pt discovery failure: %p %p %p\n",
+ save_iocb, link_mbox, login_mbox);
+ if (save_iocb)
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ if (link_mbox)
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ if (login_mbox)
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
@@ -2030,7 +2121,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
- if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
+ if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
+ bf_get_be32(prli_conf, nvpr))
+
ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
else
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a227e36cbdc2..db4a04a207ec 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -196,6 +196,46 @@ lpfc_nvme_cmd_template(void)
}
/**
+ * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
+ * @pwqeq: Pointer to command iocb.
+ * @xritag: Tag that uniqely identifies the local exchange resource.
+ * @opt: Option bits -
+ * bit 0 = inhibit sending abts on the link
+ *
+ * This function is called with hbalock held.
+ **/
+void
+lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
+{
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (opt & INHIBIT_ABORT)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ /* Abort specified xri tag, with the mask deliberately zeroed */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* Abort the IO associated with this outstanding exchange ID. */
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
+
+ /* iotag for the wqe completion. */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
+
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
+
+/**
* lpfc_nvme_create_queue -
* @lpfc_pnvme: Pointer to the driver's nvme instance data
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
@@ -1791,7 +1831,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
- union lpfc_wqe128 *abts_wqe;
unsigned long flags;
int ret_val;
@@ -1912,37 +1951,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Ready - mark outstanding as aborted by driver. */
nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* Complete prepping the abort wqe and issue to the FW. */
- abts_wqe = &abts_buf->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
- nvmereq_wqe->iocb.ulpClass);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_buf->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_buf->iocb_flag |= LPFC_IO_NVME;
@@ -2084,7 +2093,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
qp = lpfc_ncmd->hdwq;
- if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
+ if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
"ox_id x%x on reqtag x%x\n",
@@ -2139,12 +2148,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- /* Advertise how many hw queues we support based on fcp_io_sched */
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
- else
- lpfc_nvme_template.max_hw_queues =
- phba->sli4_hba.num_present_cpu;
+ /* Advertise how many hw queues we support based on cfg_hdw_queue,
+ * which will not exceed cpu count.
+ */
+ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 9884228800a5..9dc9afe1c255 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -378,13 +378,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu;
unsigned long iflag;
- if (ctxp->txrdy) {
- dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
- ctxp->txrdy_phys);
- ctxp->txrdy = NULL;
- ctxp->txrdy_phys = 0;
- }
-
if (ctxp->state == LPFC_NVMET_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6411 NVMET free, already free IO x%x: %d %d\n",
@@ -430,7 +423,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -1958,12 +1950,10 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- if (!phba->targetport) {
+ if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO x%x\n", oxid);
+ "6154 LS Drop IO\n");
oxid = 0;
size = 0;
sid = 0;
@@ -1971,6 +1961,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
}
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
@@ -2326,7 +2319,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
}
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -2401,6 +2393,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
d_buf = piocb->context2;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3015 LS Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
@@ -2429,6 +2426,11 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint64_t isr_timestamp,
uint8_t cqflag)
{
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3167 NVMET FCP Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
@@ -2595,7 +2597,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct scatterlist *sgel;
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
- uint32_t *txrdy;
dma_addr_t physaddr;
int i, cnt;
int do_pbde;
@@ -2757,23 +2758,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
&lpfc_treceive_cmd_template.words[3],
sizeof(uint32_t) * 9);
- /* Words 0 - 2 : The first sg segment */
- txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
- GFP_KERNEL, &physaddr);
- if (!txrdy) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6041 Bad txrdy buffer: oxid x%x\n",
- ctxp->oxid);
- return NULL;
- }
- ctxp->txrdy = txrdy;
- ctxp->txrdy_phys = physaddr;
- wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
- wqe->fcp_treceive.bde.addrLow =
- cpu_to_le32(putPaddrLow(physaddr));
- wqe->fcp_treceive.bde.addrHigh =
- cpu_to_le32(putPaddrHigh(physaddr));
+ /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
+ wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
+ wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
+ wqe->fcp_treceive.bde.addrLow = 0;
+ wqe->fcp_treceive.bde.addrHigh = 0;
/* Word 4 */
wqe->fcp_treceive.relative_offset = ctxp->offset;
@@ -2808,17 +2797,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
- /* Setup 1 TXRDY and 1 SKIP SGE */
- txrdy[0] = 0;
- txrdy[1] = cpu_to_be32(rsp->transfer_length);
- txrdy[2] = 0;
-
- sgl->addr_hi = putPaddrHigh(physaddr);
- sgl->addr_lo = putPaddrLow(physaddr);
+ /* Setup 2 SKIP SGEs */
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
sgl->word2 = 0;
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
+ sgl->sge_len = 0;
sgl++;
sgl->addr_hi = 0;
sgl->addr_lo = 0;
@@ -3239,9 +3224,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
- union lpfc_wqe128 *abts_wqe;
struct lpfc_nodelist *ndlp;
unsigned long flags;
+ u8 opt;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3280,8 +3265,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
- abts_wqe = &abts_wqeq->wqe;
ctxp->state = LPFC_NVMET_STE_ABORT;
+ opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3327,40 +3312,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
-
- /* word 3 */
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_wqeq->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
@@ -3495,7 +3452,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 8ff67deac10a..b80b1639b9a7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -112,9 +112,7 @@ struct lpfc_nvmet_rcv_ctx {
struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq;
- dma_addr_t txrdy_phys;
spinlock_t ctxlock; /* protect flag access */
- uint32_t *txrdy;
uint32_t sid;
uint32_t offset;
uint16_t oxid;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6822cd9ff8f1..b138d9fee675 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -134,21 +134,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
/**
* lpfc_update_stats - Update statistical data for the command completion
- * @phba: Pointer to HBA object.
+ * @vport: The virtual port on which this call is executing.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called when there is a command completion and this
* function updates the statistical data for the command completion.
**/
static void
-lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
- struct Scsi_Host *shost = cmd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long latency;
int i;
@@ -526,7 +526,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
&qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
@@ -566,7 +566,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
if (iocbq->sli4_xritag != xri)
continue;
psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
@@ -786,7 +786,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
psb->prot_seg_cnt = 0;
qp = psb->hdwq;
- if (psb->exch_busy) {
+ if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
@@ -3812,7 +3812,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
- if (!cmd) {
+ if (!cmd || !phba) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
@@ -3824,7 +3824,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
@@ -3835,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
- lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -3869,7 +3872,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
#endif
- if (lpfc_cmd->status) {
+ if (unlikely(lpfc_cmd->status)) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
@@ -4002,7 +4005,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(phba, lpfc_cmd);
+ lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4610,17 +4613,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
- if (err == 2) {
- cmnd->result = DID_ERROR << 16;
- goto out_fail_command_release_buf;
- } else if (err) {
+ if (unlikely(err)) {
+ if (err == 2) {
+ cmnd->result = DID_ERROR << 16;
+ goto out_fail_command_release_buf;
+ }
goto out_host_busy_free_buf;
}
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
@@ -4843,20 +4847,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);
}
- /* no longer need the lock after this point */
- spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) {
/* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
+ /* no longer need the lock after this point */
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 614f78dddafe..c82b5792da98 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -87,6 +87,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
+static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -467,25 +471,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
}
static void
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
- struct lpfc_eqe *eqe;
- uint32_t count = 0;
+ struct lpfc_eqe *eqe = NULL;
+ u32 eq_count = 0, cq_count = 0;
+ struct lpfc_cqe *cqe = NULL;
+ struct lpfc_queue *cq = NULL, *childq = NULL;
+ int cqid = 0;
/* walk all the EQ entries and drop on the floor */
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+ cq = NULL;
+
+ list_for_each_entry(childq, &eq->child_list, list) {
+ if (childq->queue_id == cqid) {
+ cq = childq;
+ break;
+ }
+ }
+ /* If CQ is valid, iterate through it and drop all the CQEs */
+ if (cq) {
+ cqe = lpfc_sli4_cq_get(cq);
+ while (cqe) {
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
+ cq_count++;
+ cqe = lpfc_sli4_cq_get(cq);
+ }
+ /* Clear and re-arm the CQ */
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
+ LPFC_QUEUE_REARM);
+ cq_count = 0;
+ }
__lpfc_sli4_consume_eqe(phba, eq, eqe);
- count++;
+ eq_count++;
eqe = lpfc_sli4_eq_get(eq);
}
/* Clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
}
static int
-lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ uint8_t rearm)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -519,8 +550,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
eq->queue_claimed = 0;
rearm_and_exit:
- /* Always clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+ /* Always clear the EQ. */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
return count;
}
@@ -2526,6 +2557,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
+ if (vport->load_flag & FC_UNLOADING)
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
@@ -2672,7 +2705,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x/x%x) Cmpl\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2693,7 +2727,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"(x%x/x%x) x%x x%x x%x\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2701,7 +2736,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmb),
pmbox->mbxStatus,
pmbox->un.varWords[0],
- pmb->vport->port_state);
+ pmb->vport ? pmb->vport->port_state :
+ LPFC_VPORT_UNKNOWN);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -6167,6 +6203,14 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8;
break;
+ case LPFC_SET_DUAL_DUMP:
+ bf_set(lpfc_mbx_set_feature_dd,
+ &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
+ bf_set(lpfc_mbx_set_feature_ddquery,
+ &mbox->u.mqe.un.set_feature, 0);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ break;
}
return;
@@ -6184,11 +6228,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Wait 10ms for firmware to stop using DMA buffer */
+ usleep_range(10 * 1000, 20 * 1000);
}
/**
@@ -6224,7 +6273,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL;
}
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
}
/**
@@ -6326,7 +6377,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras;
}
- ras_fwlog->ras_active = true;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool);
return;
@@ -6358,6 +6411,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
@@ -6417,6 +6474,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = REG_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@@ -7148,7 +7208,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc, i, cnt, len;
+ int rc, i, cnt, len, dd;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
@@ -7399,6 +7459,23 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /* Always try to enable dual dump feature if we can */
+ lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
+ if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
+ "6448 Dual Dump is enabled\n");
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
+ "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
+ "rc:x%x dd:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(
+ phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(
+ phba, mboxq),
+ rc, dd);
/*
* Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
* calls depends on these resources to complete port setup.
@@ -7523,9 +7600,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.nvmet_xri_cnt = rc;
- cnt = phba->cfg_iocb_cnt * 1024;
- /* We need 1 iocbq for every SGL, for IO processing */
- cnt += phba->sli4_hba.nvmet_xri_cnt;
+ /* We allocate an iocbq for every receive context SGL.
+ * The additional allocation is for abort and ls handling.
+ */
+ cnt = phba->sli4_hba.nvmet_xri_cnt +
+ phba->sli4_hba.max_cfg_param.max_xri;
} else {
/* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_io_sgl_update(phba);
@@ -7547,14 +7626,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_destroy_queue;
}
- cnt = phba->cfg_iocb_cnt * 1024;
+ /* Each lpfc_io_buf job structure has an iocbq element.
+ * This cnt provides for abort, els, ct and ls requests.
+ */
+ cnt = phba->sli4_hba.max_cfg_param.max_xri;
}
if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
+ "2821 initialize iocb list with %d entries\n",
+ cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7892,7 +7974,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -8964,7 +9046,8 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb.
*
- * This function is called with hbalock held to add a command
+ * This function is called with hbalock held for SLI3 ports or
+ * the ring lock held for SLI4 ports to add a command
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
@@ -8972,7 +9055,10 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- lockdep_assert_held(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lockdep_assert_held(&pring->ring_lock);
+ else
+ lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
}
@@ -9863,7 +9949,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-4 interface spec.
*
- * This function is called with hbalock held. The function will return success
+ * This function is called with ringlock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the
* txq.
**/
@@ -10053,10 +10139,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sli_ring *pring;
+ struct lpfc_queue *eq;
unsigned long iflags;
int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
+ eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
+
pring = lpfc_sli4_calc_ring(phba, piocb);
if (unlikely(pring == NULL))
return IOCB_ERROR;
@@ -10064,6 +10153,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -10678,14 +10769,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
prev_pring_flag = pring->flag;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txq, list) {
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
}
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txcmplq, list) {
if (iocb->vport != vport)
@@ -11050,9 +11141,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4]);
spin_unlock_irq(&phba->hbalock);
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
- lpfc_sli_release_iocbq(phba, abort_iocb);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11736,7 +11824,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
- lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
}
pdone_q = cmdiocbq->context_un.wait_queue;
@@ -13158,13 +13249,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -13217,7 +13314,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_sli_ring *pring = cq->pring;
int txq_cnt = 0;
int txcmplq_cnt = 0;
- int fcp_txcmplq_cnt = 0;
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
@@ -13239,9 +13335,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
- "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ "els_txcmplq_cnt=%d\n",
txq_cnt, phba->iocb_cnt,
- fcp_txcmplq_cnt,
txcmplq_cnt);
return false;
}
@@ -13592,6 +13687,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
LPFC_QUEUE_NOARM);
consumed = 0;
+ cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
}
if (count == LPFC_NVMET_CQ_NOTIFY)
@@ -14220,7 +14316,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
- lpfc_sli4_eq_flush(phba, fpeq);
+ lpfc_sli4_eqcq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
@@ -14230,14 +14326,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER &&
- phba->cfg_irq_chann == 1 &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq);
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
@@ -14297,6 +14393,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
+void lpfc_sli4_poll_hbtimer(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
+ struct lpfc_queue *eq;
+ int i = 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
+ i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ if (!list_empty(&phba->poll_list))
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+
+ rcu_read_unlock();
+}
+
+inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
+{
+ struct lpfc_hba *phba = eq->phba;
+ int i = 0;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+
+ return i;
+}
+
+static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ if (list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ /* kickstart slowpath processing for this eq */
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ list_add_rcu(&eq->_poll_list, &phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /* Disable slowpath processing for this eq. Kick start the eq
+ * by RE-ARMING the eq's ASAP
+ */
+ list_del_rcu(&eq->_poll_list);
+ synchronize_rcu();
+
+ if (list_empty(&phba->poll_list))
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *eq, *next;
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
+ list_del(&eq->_poll_list);
+
+ INIT_LIST_HEAD(&phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void
+__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
+{
+ if (mode == eq->mode)
+ return;
+ /*
+ * currently this function is only called during a hotplug
+ * event and the cpu on which this function is executing
+ * is going offline. By now the hotplug has instructed
+ * the scheduler to remove this cpu from cpu active mask.
+ * So we don't need to work about being put aside by the
+ * scheduler for a high priority process. Yes, the inte-
+ * rrupts could come but they are known to retire ASAP.
+ */
+
+ /* Disable polling in the fastpath */
+ WRITE_ONCE(eq->mode, mode);
+ /* flush out the store buffer */
+ smp_wmb();
+
+ /*
+ * Add this eq to the polling list and start polling. For
+ * a grace period both interrupt handler and poller will
+ * try to process the eq _but_ that's fine. We have a
+ * synchronization mechanism in place (queue_claimed) to
+ * deal with it. This is just a draining phase for int-
+ * errupt handler (not eq's) as we have guranteed through
+ * barrier that all the CPUs have seen the new CQ_POLLED
+ * state. which will effectively disable the REARMING of
+ * the EQ. The whole idea is eq's die off eventually as
+ * we are not rearming EQ's anymore.
+ */
+ mode ? lpfc_sli4_add_to_poll_list(eq) :
+ lpfc_sli4_remove_from_poll_list(eq);
+}
+
+void lpfc_sli4_start_polling(struct lpfc_queue *eq)
+{
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
+}
+
+void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
+
+ /* Kick start for the pending io's in h/w.
+ * Once we switch back to interrupt processing on a eq
+ * the io path completion will only arm eq's when it
+ * receives a completion. But since eq's are in disa-
+ * rmed state it doesn't receive a completion. This
+ * creates a deadlock scenaro.
+ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
+}
+
/**
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
@@ -14371,6 +14608,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
return NULL;
INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->_poll_list);
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list);
@@ -18124,8 +18362,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0001 rpi:%x max:%x lim:%x\n",
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
(int) rpi, max_rpi, rpi_limit);
/*
@@ -18181,11 +18420,19 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
} else {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2016 rpi %x not inuse\n",
rpi);
}
@@ -19683,6 +19930,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19703,6 +19952,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19731,6 +19982,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
return WQE_ERROR;
@@ -20093,6 +20346,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+
if (phba->cfg_xri_rebalancing) {
if (lpfc_ncmd->expedite) {
/* Return to expedite pool */
@@ -20157,13 +20417,6 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflag);
}
-
- if (phba->cfg_xpsgl && !phba->nvmet_support &&
- !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
- lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
-
- if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
- lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
}
/**
@@ -20399,8 +20652,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *allocated_sgl = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the sgl_list */
@@ -20412,9 +20666,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8353 error kmalloc memory for HDWQ "
@@ -20434,7 +20688,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
return NULL;
}
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
}
@@ -20442,7 +20696,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_sgl;
}
@@ -20466,8 +20720,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20480,7 +20735,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20501,8 +20756,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->sgl_list;
struct sli4_hybrid_sgl *list_entry = NULL;
struct sli4_hybrid_sgl *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free sgl pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20514,7 +20770,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
/**
@@ -20538,8 +20794,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *allocated_buf = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the list */
@@ -20552,9 +20809,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8355 error kmalloc memory for HDWQ "
@@ -20579,7 +20836,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
sizeof(struct fcp_cmnd));
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
}
@@ -20587,7 +20844,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_buf;
}
@@ -20612,8 +20869,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20626,7 +20884,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20647,8 +20905,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
struct fcp_cmd_rsp_buf *list_entry = NULL;
struct fcp_cmd_rsp_buf *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free cmd_rsp buf pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20661,5 +20920,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 37fbcb46387e..7bcf922a8be2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -384,14 +384,13 @@ struct lpfc_io_buf {
struct lpfc_nodelist *ndlp;
uint32_t timeout;
- uint16_t flags; /* TBD convert exch_busy to flags */
+ uint16_t flags;
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
/* External DIF device IO conversions */
#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 0d4882a9e634..d963ca871383 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -41,8 +41,13 @@
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_HDWQ_MIN 0
-#define LPFC_HBA_HDWQ_MAX 128
-#define LPFC_HBA_HDWQ_DEF 0
+#define LPFC_HBA_HDWQ_MAX 256
+#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
+
+/* irq_chann range, values */
+#define LPFC_IRQ_CHANN_MIN 0
+#define LPFC_IRQ_CHANN_MAX 256
+#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
/* FCP MQ queue count limiting */
#define LPFC_FCP_MQ_THRESHOLD_MIN 0
@@ -133,6 +138,23 @@ struct lpfc_rqb {
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
+
+ /*
+ * If interrupts are in effect on _all_ the eq's the footprint
+ * of polling code is zero (except mode). This memory is chec-
+ * ked for every io to see if the io needs to be polled and
+ * while completion to check if the eq's needs to be rearmed.
+ * Keep in same cacheline as the queue ptr to avoid cpu fetch
+ * stalls. Using 1B memory will leave us with 7B hole. Fill
+ * it with other frequently used members.
+ */
+ uint16_t last_cpu; /* most recent cpu */
+ uint16_t hdwq;
+ uint8_t qe_valid;
+ uint8_t mode; /* interrupt or polling */
+#define LPFC_EQ_INTERRUPT 0
+#define LPFC_EQ_POLL 1
+
struct list_head wqfull_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
@@ -199,6 +221,7 @@ struct lpfc_queue {
uint8_t q_flag;
#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
#define LPFC_NVMET_CQ_NOTIFY 4
void __iomem *db_regaddr;
uint16_t dpp_enable;
@@ -239,10 +262,8 @@ struct lpfc_queue {
struct delayed_work sched_spwork;
uint64_t isr_timestamp;
- uint16_t hdwq;
- uint16_t last_cpu; /* most recent cpu */
- uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
+ struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
};
@@ -451,11 +472,17 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
+ uint16_t irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
+ struct cpumask aff_mask;
};
+#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
+#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
+#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
+
/*BB Credit recovery value*/
struct lpfc_bbscn_params {
uint32_t word0;
@@ -513,6 +540,7 @@ struct lpfc_pc_sli4_params {
uint8_t cqav;
uint8_t wqsize;
uint8_t bv1s;
+ uint8_t pls;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -544,11 +572,10 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
-/* Used for IRQ vector to CPU mapping */
+/* Used for tracking CPU mapping attributes */
struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
- uint16_t irq;
uint16_t eq;
uint16_t hdwq;
uint16_t flag;
@@ -891,6 +918,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
+ struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
uint32_t conf_trunk;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b8aae31ffda3..9e5ff58edaca 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.4.0.0"
+#define LPFC_DRIVER_VERSION "12.6.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 9c5566217ef6..b5dde9d0d054 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6e788c02ff4..bd8184072bed 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -24,6 +24,8 @@
#define MEGASAS_VERSION "07.710.50.00-rc1"
#define MEGASAS_RELDATE "June 28, 2019"
+#define MEGASAS_MSIX_NAME_LEN 32
+
/*
* Device IDs
*/
@@ -2203,6 +2205,7 @@ struct megasas_aen_event {
};
struct megasas_irq_context {
+ char name[MEGASAS_MSIX_NAME_LEN];
struct megasas_instance *instance;
u32 MSIxIndex;
u32 os_irq;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 42cf38c1ea99..a4bc81479284 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -199,7 +199,7 @@ static bool support_nvme_encapsulation;
static bool support_pci_lane_margining;
/* define lock for aen poll */
-spinlock_t poll_aen_lock;
+static spinlock_t poll_aen_lock;
extern struct dentry *megasas_debugfs_root;
extern void megasas_init_debugfs(void);
@@ -5546,9 +5546,11 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
+ snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
+ "megasas", instance->host->host_no);
if (request_irq(pci_irq_vector(pdev, 0),
instance->instancet->service_isr, IRQF_SHARED,
- "megasas", &instance->irq_context[0])) {
+ instance->irq_context->name, &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -5580,8 +5582,10 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
+ snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
+ "megasas", instance->host->host_no, i);
if (request_irq(pci_irq_vector(pdev, i),
- instance->instancet->service_isr, 0, "megasas",
+ instance->instancet->service_isr, 0, instance->irq_context[i].name,
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 50b8c1b12767..89c3685f5163 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -386,9 +386,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
- u64 blk, debugBlk;
+ u64 blk;
blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
- debugBlk = blk;
blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
*span_blk = blk;
@@ -699,9 +698,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
u32 logArm, rowMod, armQ, arm;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
/*Get row and span from io_info for Uneven Span IO.*/
@@ -801,9 +798,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u64 *pdBlock = &io_info->pdBlock;
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
row = mega_div64_32(stripRow, raid->rowDataSize);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index fea3cb6a090b..848fbec7bda6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3044,11 +3044,11 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
descp = NULL;
ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
- ioc->msix_vector_count);
+ ioc->reply_queue_count);
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues,
- ioc->msix_vector_count, irq_flags, descp);
+ ioc->reply_queue_count, irq_flags, descp);
return i;
}
@@ -4242,10 +4242,12 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
{
- Mpi2FWImageHeader_t *FWImgHdr;
+ Mpi2FWImageHeader_t *fw_img_hdr;
+ Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
int r = 0;
+ u32 package_version = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
u16 smid, ioc_status;
@@ -4302,14 +4304,26 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
- FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
- if (FWImgHdr->PackageVersion.Word) {
- ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
- FWImgHdr->PackageVersion.Struct.Major,
- FWImgHdr->PackageVersion.Struct.Minor,
- FWImgHdr->PackageVersion.Struct.Unit,
- FWImgHdr->PackageVersion.Struct.Dev);
- }
+ fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
+ if (le32_to_cpu(fw_img_hdr->Signature) ==
+ MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
+ cmp_img_hdr =
+ (Mpi26ComponentImageHeader_t *)
+ (fwpkg_data);
+ package_version =
+ le32_to_cpu(
+ cmp_img_hdr->ApplicationSpecific);
+ } else
+ package_version =
+ le32_to_cpu(
+ fw_img_hdr->PackageVersion.Word);
+ if (package_version)
+ ioc_info(ioc,
+ "FW Package Ver(%02d.%02d.%02d.%02d)\n",
+ ((package_version) & 0xFF000000) >> 24,
+ ((package_version) & 0x00FF0000) >> 16,
+ ((package_version) & 0x0000FF00) >> 8,
+ (package_version) & 0x000000FF);
} else {
_debug_dump_mf(&mpi_reply,
sizeof(Mpi2FWUploadReply_t)/4);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index faca0a5e71f8..4ebf81ea4d2f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "31.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 31
+#define MPT3SAS_DRIVER_VERSION "32.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 32
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -303,6 +303,8 @@ struct mpt3sas_nvme_cmd {
#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08)
+#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10)
/*
* HP HBA branding
@@ -391,9 +393,12 @@ struct Mpi2ManufacturingPage11_t {
u8 Reserved6; /* 2Fh */
__le32 Reserved7[7]; /* 30h - 4Bh */
u8 NVMeAbortTO; /* 4Ch */
- u8 Reserved8; /* 4Dh */
- u16 Reserved9; /* 4Eh */
- __le32 Reserved10[4]; /* 50h - 60h */
+ u8 NumPerDevEvents; /* 4Dh */
+ u8 HostTraceBufferDecrementSizeKB; /* 4Eh */
+ u8 HostTraceBufferFlags; /* 4Fh */
+ u16 HostTraceBufferMaxSizeKB; /* 50h */
+ u16 HostTraceBufferMinSizeKB; /* 52h */
+ __le32 Reserved10[2]; /* 54h - 5Bh */
};
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7d696952b376..6874cf017739 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -466,6 +466,13 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
+
+ /*
+ * add a log message to indicate the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer due to adapter reset.",
+ __func__);
mpt3sas_send_diag_release(ioc, i, &issue_reset);
}
}
@@ -778,6 +785,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ if (!ioc->pcie_sg_lookup) {
+ dtmprintk(ioc, ioc_info(ioc,
+ "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
+ ));
+
+ if (ioc->logging_level & MPT_DEBUG_TM)
+ _debug_dump_mf(nvme_encap_request,
+ ioc->request_sz/4);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
/*
* Get the Physical Address of the sense buffer.
* Use Error Response buffer address field to hold the sense
@@ -1484,6 +1503,26 @@ _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
return rc;
}
+/**
+ * _ctl_diag_get_bufftype - return diag buffer type
+ * either TRACE, SNAPSHOT, or EXTENDED
+ * @ioc: per adapter object
+ * @unique_id: specifies the unique_id for the buffer
+ *
+ * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
+ */
+static u8
+_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
+{
+ u8 index;
+
+ for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
+ if (ioc->unique_id[index] == unique_id)
+ return index;
+ }
+
+ return MPT3_DIAG_UID_NOT_FOUND;
+}
/**
* _ctl_diag_register_2 - wrapper for registering diag buffer support
@@ -1531,11 +1570,88 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
return -EPERM;
}
+ if (diag_register->unique_id == 0) {
+ ioc_err(ioc,
+ "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
+ diag_register->unique_id, buffer_type);
+ return -EINVAL;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
+ __func__, buffer_type, ioc->unique_id[buffer_type]);
+ return -EINVAL;
+ }
+
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
- ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
- __func__, buffer_type);
- return -EINVAL;
+ /*
+ * If driver posts buffer initially, then an application wants
+ * to Register that buffer (own it) without Releasing first,
+ * the application Register command MUST have the same buffer
+ * type and size in the Register command (obtained from the
+ * Query command). Otherwise that Register command will be
+ * failed. If the application has released the buffer but wants
+ * to re-register it, it should be allowed as long as the
+ * Unique-Id/Size match.
+ */
+
+ if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
+ ioc->diag_buffer_sz[buffer_type] ==
+ diag_register->requested_buffer_size) {
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ dctlprintk(ioc, ioc_info(ioc,
+ "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
+ __func__, buffer_type,
+ ioc->unique_id[buffer_type],
+ diag_register->unique_id));
+
+ /*
+ * Application wants to own the buffer with
+ * the same size.
+ */
+ ioc->unique_id[buffer_type] =
+ diag_register->unique_id;
+ rc = 0; /* success */
+ goto out;
+ }
+ } else if (ioc->unique_id[buffer_type] !=
+ MPT3DIAGBUFFUNIQUEID) {
+ if (ioc->unique_id[buffer_type] !=
+ diag_register->unique_id ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size ||
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else {
+ ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+
+ if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size) {
+
+ ioc_err(ioc,
+ "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
+ __func__, buffer_type,
+ ioc->diag_buffer_sz[buffer_type]);
+ return -EINVAL;
+ }
}
if (diag_register->requested_buffer_size % 4) {
@@ -1560,7 +1676,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
request_data = ioc->diag_buffer[buffer_type];
request_data_sz = diag_register->requested_buffer_size;
ioc->unique_id[buffer_type] = diag_register->unique_id;
- ioc->diag_buffer_status[buffer_type] = 0;
+ ioc->diag_buffer_status[buffer_type] &=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
memcpy(ioc->product_specific[buffer_type],
diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
@@ -1584,7 +1701,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
__func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
@@ -1649,9 +1767,12 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out:
- if (rc && request_data)
+ if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
return rc;
@@ -1669,6 +1790,10 @@ void
mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
{
struct mpt3_diag_register diag_register;
+ u32 ret_val;
+ u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
+ u32 min_trace_buff_size = 0;
+ u32 decr_trace_buff_size = 0;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
@@ -1677,10 +1802,68 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- /* register for 2MB buffers */
- diag_register.requested_buffer_size = 2 * (1024 * 1024);
- diag_register.unique_id = 0x7075900;
- _ctl_diag_register_2(ioc, &diag_register);
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
+
+ if (trace_buff_size != 0) {
+ diag_register.requested_buffer_size = trace_buff_size;
+ min_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
+ decr_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
+
+ if (min_trace_buff_size > trace_buff_size) {
+ /* The buff size is not set correctly */
+ ioc_err(ioc,
+ "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
+ min_trace_buff_size>>10,
+ trace_buff_size>>10);
+ ioc_err(ioc,
+ "Using zero Min Trace Buff Size\n");
+ min_trace_buff_size = 0;
+ }
+
+ if (decr_trace_buff_size == 0) {
+ /*
+ * retry the min size if decrement
+ * is not available.
+ */
+ decr_trace_buff_size =
+ trace_buff_size - min_trace_buff_size;
+ }
+ } else {
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ }
+
+ do {
+ ret_val = _ctl_diag_register_2(ioc, &diag_register);
+
+ if (ret_val == -ENOMEM && min_trace_buff_size &&
+ (trace_buff_size - decr_trace_buff_size) >=
+ min_trace_buff_size) {
+ /* adjust the buffer size */
+ trace_buff_size -= decr_trace_buff_size;
+ diag_register.requested_buffer_size =
+ trace_buff_size;
+ } else
+ break;
+ } while (true);
+
+ if (ret_val == -ENOMEM)
+ ioc_err(ioc,
+ "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
+ diag_register.requested_buffer_size>>10);
+ else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
+ & MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
}
if (bits_to_register & 2) {
@@ -1723,6 +1906,12 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
rc = _ctl_diag_register_2(ioc, &karg);
+
+ if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ ioc->diag_buffer_status[karg.buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_APP_OWNED;
+
return rc;
}
@@ -1752,7 +1941,13 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -1785,12 +1980,21 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- request_data_sz = ioc->diag_buffer_sz[buffer_type];
- request_data_dma = ioc->diag_buffer_dma[buffer_type];
- dma_free_coherent(&ioc->pdev->dev, request_data_sz,
- request_data, request_data_dma);
- ioc->diag_buffer[buffer_type] = NULL;
- ioc->diag_buffer_status[buffer_type] = 0;
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+ ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_REGISTERED;
+ } else {
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ }
return 0;
}
@@ -1829,14 +2033,17 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EPERM;
}
- if ((ioc->diag_buffer_status[buffer_type] &
- MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
- __func__, buffer_type);
- return -EINVAL;
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
}
- if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id) {
if (karg.unique_id != ioc->unique_id[buffer_type]) {
ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
__func__, karg.unique_id);
@@ -1851,13 +2058,21 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID);
- else
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID |
- MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
+ karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED))
+ karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
karg.product_specific[i] =
@@ -2002,7 +2217,13 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2026,7 +2247,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED) {
ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
__func__, buffer_type);
- return 0;
+ return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
@@ -2086,7 +2307,13 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2210,6 +2437,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_RELEASED;
dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
@@ -3130,10 +3359,49 @@ host_trace_buffer_enable_store(struct device *cdev,
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- diag_register.requested_buffer_size = (1024 * 1024);
- diag_register.unique_id = 0x7075900;
+
+ if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
+ /* post the same buffer allocated previously */
+ diag_register.requested_buffer_size =
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
+ } else {
+ /*
+ * Free the diag buffer memory which was previously
+ * allocated by an application.
+ */
+ if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
+ &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[
+ MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer_dma[
+ MPI2_DIAG_BUF_TYPE_TRACE]);
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
+ NULL;
+ }
+
+ diag_register.requested_buffer_size = (1024 * 1024);
+ }
+
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
_ctl_diag_register_2(ioc, &diag_register);
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_info(ioc,
+ "Trace buffer %d KB allocated through sysfs\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
} else if (!strcmp(str, "release")) {
/* exit out if host buffers are already released */
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
@@ -3702,12 +3970,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!ioc->diag_buffer[i])
continue;
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
dma_free_coherent(&ioc->pdev->dev,
ioc->diag_buffer_sz[i],
ioc->diag_buffer[i],
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 18b46faef6f1..0f7aa4ddade0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -95,6 +95,14 @@
#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
struct mpt3_diag_read_buffer)
+/* Trace Buffer default UniqueId */
+#define MPT2DIAGBUFFUNIQUEID (0x07075900)
+#define MPT3DIAGBUFFUNIQUEID (0x4252434D)
+
+/* UID not found */
+#define MPT3_DIAG_UID_NOT_FOUND (0xFF)
+
+
/**
* struct mpt3_ioctl_header - main header structure
* @ioc_number - IOC unit number
@@ -310,6 +318,7 @@ struct mpt3_ioctl_btdh_mapping {
#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008)
/* flags for mpt3_diag_read_buffer */
#define MPT3_FLAGS_REREGISTER (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c8e512ba6d39..a038be8a0e90 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5161,7 +5161,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -10193,6 +10193,8 @@ scsih_scan_start(struct Scsi_Host *shost)
int rc;
if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+ else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
+ mpt3sas_enable_diag_buffer(ioc, 1);
if (disable_discovery > 0)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index 6ac453fd5937..8ec9bab20ec4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -113,15 +113,21 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
{
u8 issue_reset = 0;
+ u32 *trig_data = (u32 *)&event_data->u.master;
dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dTriggerDiagPrintk(ioc,
- ioc_info(ioc, "%s: release trace diag buffer\n",
- __func__));
+ /*
+ * add a log message so that user knows which event caused
+ * the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n",
+ __func__, event_data->trigger_type,
+ trig_data[0], trig_data[1]);
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 3e0b8ebe257f..a920eced92ec 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1541,7 +1541,7 @@ out:
int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc;
struct mvs_tmf_task tmf_task;
tmf_task.tmf = TMF_ABORT_TASK_SET;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index e0b427fdf818..11a2cb844ecb 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1722,7 +1722,7 @@ struct ncb {
** Miscellaneous configuration and status parameters.
**----------------------------------------------------------------
*/
- u_char disc; /* Diconnection allowed */
+ u_char disc; /* Disconnection allowed */
u_char scsi_mode; /* Current SCSI BUS mode */
u_char order; /* Tag order to use */
u_char verbose; /* Verbosity for this controller*/
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 70db79254155..b6e04d14292d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1542,7 +1542,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* with ACK reply when below condition is matched:
* MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer.
- * MsgIn 04: Diconnect.
+ * MsgIn 04: Disconnect.
* In other case, unexpected BUSFREE is detected.
*/
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 2368f34efba3..dc9b74c9348a 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -32,7 +32,7 @@ config PCMCIA_FDOMAIN
config PCMCIA_NINJA_SCSI
tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
- depends on !64BIT
+ depends on !64BIT || COMPILE_TEST
help
If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 97416e1dcc5b..93616f9fd6d7 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -56,9 +56,7 @@
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
#include "nsp_io.h"
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 6b85016b4db3..7c6be2ec110d 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -70,6 +70,25 @@ static
DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
/**
+ * controller_fatal_error_show - check controller is under fatal err
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read only' shost attribute.
+ */
+static ssize_t controller_fatal_error_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->controller_fatal_error);
+}
+static DEVICE_ATTR_RO(controller_fatal_error);
+
+/**
* pm8001_ctl_fw_version_show - firmware version
* @cdev: pointer to embedded class device
* @buf: the buffer returned
@@ -804,6 +823,7 @@ static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev,
+ &dev_attr_controller_fatal_error,
&dev_attr_fw_version,
&dev_attr_update_fw,
&dev_attr_aap_log,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 68a8217032d0..2328ff1349ac 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1186,7 +1186,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
{
s8 bar, logical = 0;
- for (bar = 0; bar < 6; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
/*
** logical BARs for SPC:
** bar 0 and 1 - logical BAR0
@@ -1336,10 +1336,13 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
* @circularQ: the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command.
+ * @nb: size in bytes of the command payload
+ * @responseQueue: queue to interrupt on w/ command response (if any)
*/
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue)
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue)
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
@@ -1350,10 +1353,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
pm8001_printk("No free mpi buffer\n"));
return -ENOMEM;
}
- BUG_ON(!payload);
- /*Copy to the payload*/
- memcpy(pMessage, payload, (pm8001_ha->iomb_size -
- sizeof(struct mpi_msg_hdr)));
+
+ if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
+ nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr);
+ memcpy(pMessage, payload, nb);
+ if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size)
+ memset(pMessage + nb, 0, pm8001_ha->iomb_size -
+ (nb + sizeof(struct mpi_msg_hdr)));
/*Build the header*/
Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1364,7 +1370,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
/*Update the PI to the firmware*/
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx);
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
responseQueue, opCode, circularQ->producer_idx,
circularQ->consumer_index));
@@ -1436,6 +1442,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "outbound opcode msgheader:%x ci=%d pi=%d\n",
+ msgHeader_tmp, circularQ->consumer_idx,
+ circularQ->producer_index));
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
@@ -1604,7 +1614,8 @@ void pm8001_work_fn(struct work_struct *work)
break;
default:
- pm8001_printk("...query task failed!!!\n");
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "...query task failed!!!\n"));
break;
});
@@ -1758,7 +1769,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
if (ret)
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1831,7 +1843,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1890,6 +1903,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr)));
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2072,7 +2090,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2125,7 +2143,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("port_id = %x,device_id = %x\n",
port_id, dev_id));
switch (event) {
@@ -2263,7 +2281,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2352,6 +2370,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2652,7 +2676,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2723,7 +2747,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, dev_id, tag, event));
switch (event) {
@@ -2872,7 +2896,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2917,9 +2941,13 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t = ccb->task;
ts = &t->task_status;
pm8001_dev = ccb->device;
- if (status)
+ if (status) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("smp IO status 0x%x\n", status));
+ PM8001_IOERR_DBG(pm8001_ha,
+ pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+ }
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
@@ -3070,7 +3098,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -3355,7 +3383,8 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3416,7 +3445,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -3463,7 +3492,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
" phy id = %d\n", port_id, phy_id));
port->port_state = portstate;
@@ -3541,7 +3570,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" phy Down and(default) = %x\n",
portstate));
break;
@@ -3689,7 +3718,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("No matched status = %d\n", status));
break;
}
@@ -3805,8 +3834,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("outbound queue HW event & event type : "));
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
+ port_id, phy_id, eventType, status));
switch (eventType) {
case HW_EVENT_PHY_START_STATUS:
PM8001_MSG_DBG(pm8001_ha,
@@ -3990,7 +4020,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type = %x\n", eventType));
break;
}
@@ -4161,7 +4191,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
opc));
break;
@@ -4284,7 +4314,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ &smp_cmd, sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
@@ -4352,7 +4382,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
+ sizeof(ssp_cmd), 0);
return ret;
}
@@ -4461,7 +4492,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
return ret;
}
@@ -4496,7 +4528,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4551,8 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4577,7 +4611,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -4598,7 +4633,8 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
payload.device_id = cpu_to_le32(device_id);
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("unregister device device_id = %d\n", device_id));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4621,7 +4657,8 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4649,6 +4686,9 @@ static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm8001_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm8001_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4672,7 +4712,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag = cpu_to_le32(cmd_tag);
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
return ret;
}
@@ -4729,7 +4770,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_ha->chip_id != chip_8001)
sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
+ sizeof(sspTMCmd), 0);
return ret;
}
@@ -4819,7 +4861,8 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4903,7 +4946,8 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4938,7 +4982,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4960,6 +5005,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "dma fw_control context input length :%x\n", fw_control->len));
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -5083,7 +5130,8 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -5108,7 +5156,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3374f553c617..ff618ad80ebd 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -41,6 +41,19 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_chips.h"
+#include "pm80xx_hwi.h"
+
+static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
+module_param(logging_level, ulong, 0644);
+MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
+
+static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120;
+module_param(link_rate, ulong, 0644);
+MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
+ " 1: Link rate 1.5G\n"
+ " 2: Link rate 3.0G\n"
+ " 4: Link rate 6.0G\n"
+ " 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
@@ -401,7 +414,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
pdev = pm8001_ha->pdev;
/* map pci mem (PMC pci base 0-3)*/
- for (bar = 0; bar < 6; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
/*
** logical BARs for SPC:
** bar 0 and 1 - logical BAR0
@@ -432,7 +445,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0;
- pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
+ pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL;
}
logicalBar++;
}
@@ -466,7 +479,15 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- pm8001_ha->logging_level = 0x01;
+ pm8001_ha->logging_level = logging_level;
+ if (link_rate >= 1 && link_rate <= 15)
+ pm8001_ha->link_rate = (link_rate << 8);
+ else {
+ pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
+ LINKRATE_60 | LINKRATE_120;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Setting link rate to default value\n"));
+ }
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */
if (pm8001_ha->chip_id != chip_8001)
@@ -873,7 +894,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
u32 number_of_intr;
int flag = 0;
int rc;
- static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
@@ -894,14 +914,16 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
rc, pm8001_ha->number_of_intr));
for (i = 0; i < number_of_intr; i++) {
- snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
- DRV_NAME"%d", i);
+ snprintf(pm8001_ha->intr_drvname[i],
+ sizeof(pm8001_ha->intr_drvname[0]),
+ "%s-%d", pm8001_ha->name, i);
pm8001_ha->irq_vector[i].irq_id = i;
pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
pm8001_interrupt_handler_msix, flag,
- intr_drvname[i], &(pm8001_ha->irq_vector[i]));
+ pm8001_ha->intr_drvname[i],
+ &(pm8001_ha->irq_vector[i]));
if (rc) {
for (j = 0; j < i; j++) {
free_irq(pci_irq_vector(pm8001_ha->pdev, i),
@@ -942,7 +964,7 @@ intx:
pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
- DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
+ pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 7e48154e11c3..b7cbc312843e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -119,7 +119,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
- pm8001_printk("memory allocation error\n");
+ pr_err("pm80xx: memory allocation error\n");
return -1;
}
*pphys_addr = mem_dma_handle;
@@ -249,6 +249,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
+ PM8001_DEVIO_DBG(pm8001_ha,
+ pm8001_printk("func 0x%x\n", func));
rc = -EOPNOTSUPP;
}
msleep(300);
@@ -384,8 +386,9 @@ static int pm8001_task_exec(struct sas_task *task,
struct pm8001_port *port = NULL;
struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
- u32 tag = 0xdeadbeef, rc, n_elem = 0;
+ u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
unsigned long flags = 0;
+ enum sas_protocol task_proto = t->task_proto;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@@ -410,7 +413,7 @@ static int pm8001_task_exec(struct sas_task *task,
pm8001_dev = dev->lldd_dev;
port = &pm8001_ha->port[sas_find_local_port_id(dev)];
if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
- if (sas_protocol_ata(t->task_proto)) {
+ if (sas_protocol_ata(task_proto)) {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
@@ -432,7 +435,7 @@ static int pm8001_task_exec(struct sas_task *task,
goto err_out;
ccb = &pm8001_ha->ccb_info[tag];
- if (!sas_protocol_ata(t->task_proto)) {
+ if (!sas_protocol_ata(task_proto)) {
if (t->num_scatter) {
n_elem = dma_map_sg(pm8001_ha->dev,
t->scatter,
@@ -452,7 +455,7 @@ static int pm8001_task_exec(struct sas_task *task,
ccb->ccb_tag = tag;
ccb->task = t;
ccb->device = pm8001_dev;
- switch (t->task_proto) {
+ switch (task_proto) {
case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
break;
@@ -469,8 +472,7 @@ static int pm8001_task_exec(struct sas_task *task,
break;
default:
dev_printk(KERN_ERR, pm8001_ha->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
+ "unknown sas_task proto: 0x%x\n", task_proto);
rc = -EINVAL;
break;
}
@@ -493,7 +495,7 @@ err_out_tag:
pm8001_tag_free(pm8001_ha, tag);
err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(t->task_proto))
+ if (!sas_protocol_ata(task_proto))
if (n_elem)
dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
@@ -1179,7 +1181,7 @@ int pm8001_query_task(struct sas_task *task)
break;
}
}
- pm8001_printk(":rc= %d\n", rc);
+ pr_err("pm80xx: rc= %d\n", rc);
return rc;
}
@@ -1202,8 +1204,8 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy;
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
+ ret = pm8001_find_tag(task, &tag);
+ if (ret == 0) {
pm8001_printk("no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
@@ -1241,26 +1243,50 @@ int pm8001_abort_task(struct sas_task *task)
/* 2. Send Phy Control Hard Reset */
reinit_completion(&completion);
+ phy->port_reset_status = PORT_RESET_TMO;
phy->reset_success = false;
phy->enable_completion = &completion;
phy->reset_completion = &completion_reset;
ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET);
- if (ret)
- goto out;
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for local phy ctl\n"));
- wait_for_completion(&completion);
- if (!phy->reset_success)
+ if (ret) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
goto out;
+ }
- /* 3. Wait for Port Reset complete / Port reset TMO */
+ /* In the case of the reset timeout/fail we still
+ * abort the command at the firmware. The assumption
+ * here is that the drive is off doing something so
+ * that it's not processing requests, and we want to
+ * avoid getting a completion for this and either
+ * leaking the task in libsas or losing the race and
+ * getting a double free.
+ */
PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ ret = wait_for_completion_timeout(&completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret || !phy->reset_success) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
+ } else {
+ /* 3. Wait for Port Reset complete or
+ * Port reset TMO
+ */
+ PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
- wait_for_completion(&completion_reset);
- if (phy->port_reset_status) {
- pm8001_dev_gone_notify(dev);
- goto out;
+ ret = wait_for_completion_timeout(
+ &completion_reset,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ phy->reset_completion = NULL;
+ WARN_ON(phy->port_reset_status ==
+ PORT_RESET_TMO);
+ if (phy->port_reset_status == PORT_RESET_TMO) {
+ pm8001_dev_gone_notify(dev);
+ goto out;
+ }
}
/*
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index ff17c6aff63d..93438c8f67da 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -66,8 +66,11 @@
#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
-#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
- format, __func__, __LINE__, ## arg)
+#define PM8001_DEV_LOGGING 0x80 /* development message logging */
+#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
+#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
+#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \
+ format, pm8001_ha->name, __func__, __LINE__, ## arg)
#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
do { \
if (unlikely(HBA->logging_level & LEVEL)) \
@@ -97,6 +100,14 @@ do { \
#define PM8001_MSG_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
+#define PM8001_DEV_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD)
+
+#define PM8001_DEVIO_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD)
+
+#define PM8001_IOERR_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD)
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
@@ -141,6 +152,8 @@ struct pm8001_ioctl_payload {
#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */
+#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */
#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
@@ -496,6 +509,7 @@ struct pm8001_hba_info {
u32 forensic_last_offset;
u32 fatal_forensic_shift_offset;
u32 forensic_fatal_step;
+ u32 forensic_preserved_accumulated_transfer;
u32 evtlog_ib_offset;
u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
@@ -530,11 +544,14 @@ struct pm8001_hba_info {
struct pm8001_ccb_info *ccb_info;
#ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/
+ char intr_drvname[PM8001_MAX_MSIX_VEC]
+ [PM8001_NAME_LENGTH+1+3+1];
#endif
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
#endif
u32 logging_level;
+ u32 link_rate;
u32 fw_status;
u32 smp_exp_mode;
bool controller_fatal_error;
@@ -663,7 +680,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue);
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 73261902d75d..98dcdbd146d5 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -75,7 +75,7 @@ void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
destination1 = (u32 *)destination;
for (index = 0; index < dw_count; index += 4, destination1++) {
- offset = (soffset + index / 4);
+ offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
*destination1 = cpu_to_le32(value);
@@ -92,9 +92,12 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 accum_len , reg_val, index, *temp;
+ u32 status = 1;
unsigned long start;
u8 *direct_data;
char *fatal_error_data = buf;
+ u32 length_to_read;
+ u32 offset;
pm8001_ha->forensic_info.data_buf.direct_data = buf;
if (pm8001_ha->chip_id == chip_8001) {
@@ -104,16 +107,35 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
+ pm8001_ha->forensic_preserved_accumulated_transfer = 0;
- pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ /* Write signature to fatal dump table */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
+ pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: status1 %d\n", status));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+ }
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,30 +148,66 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
/* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
- accum_len));
+ /* Determine length of data between previously stored transfer length
+ * and current accumulated transfer length
+ */
+ length_to_read =
+ accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n",
+ length_to_read));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: last_offset 0x%x\n",
+ pm8001_ha->forensic_last_offset));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+
+ /* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n",
- accum_len));
- return -EIO;
+ accum_len));
+ return status;
}
- if (accum_len == 0 || accum_len >= 0x100000) {
+ /* If accumulated length is zero fail the attempt */
+ if (accum_len == 0) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
- "%08x ", 0xFFFFFFFF);
+ "%08x ", 0xFFFFFFFF);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* Accumulated length is good so start capturing the first data */
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (pm8001_ha->forensic_fatal_step == 0) {
moreData:
+ /* If data to read is less than SYSFS_OFFSET then reduce the
+ * length of dataLen
+ */
+ if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET
+ > length_to_read) {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ length_to_read -
+ pm8001_ha->forensic_last_offset;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ SYSFS_OFFSET;
+ }
if (pm8001_ha->forensic_info.data_buf.direct_data) {
/* Data is in bar, copy to host memory */
- pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
- pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
- pm8001_ha->forensic_info.data_buf.direct_len ,
- 1);
+ pm80xx_pci_mem_copy(pm8001_ha,
+ pm8001_ha->fatal_bar_loc,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+ pm8001_ha->forensic_info.data_buf.direct_len, 1);
}
pm8001_ha->fatal_bar_loc +=
pm8001_ha->forensic_info.data_buf.direct_len;
@@ -160,21 +218,29 @@ moreData:
pm8001_ha->forensic_info.data_buf.read_len =
pm8001_ha->forensic_info.data_buf.direct_len;
- if (pm8001_ha->forensic_last_offset >= accum_len) {
+ if (pm8001_ha->forensic_last_offset >= length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 3);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_bar_loc = 0;
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -184,12 +250,20 @@ moreData:
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data
+ += sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -199,63 +273,122 @@ moreData:
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < 256; index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4) ; index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_forensic_shift_offset += 0x100;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->forensic_fatal_step == 1) {
- pm8001_ha->fatal_forensic_shift_offset = 0;
- /* Read 64K of the debug data. */
- pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
- pm8001_ha->fatal_forensic_shift_offset);
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+ /* store previous accumulated length before triggering next
+ * accumulated length update
+ */
+ pm8001_ha->forensic_preserved_accumulated_transfer =
+ pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+
+ /* continue capturing the fatal log until Dump status is 0x3 */
+ if (pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS) <
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+
+ /* reset fddstat bit by writing to zero*/
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS, 0x0);
+
+ /* set dump control value to '1' so that new data will
+ * be transferred to shared memory
+ */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
MPI_FATAL_EDUMP_HANDSHAKE_RDY);
- /* Poll FDDHSHK until clear */
- start = jiffies + (2 * HZ); /* 2 sec */
+ /*Poll FDDHSHK until clear */
+ start = jiffies + (2 * HZ); /* 2 sec */
- do {
- reg_val = pm8001_mr32(fatal_table_address,
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
- } while ((reg_val) && time_before(jiffies, start));
+ } while ((reg_val) && time_before(jiffies, start));
- if (reg_val != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
- " = 0x%x\n", reg_val));
- return -EIO;
- }
-
- /* Read the next 64K of the debug data. */
- pm8001_ha->forensic_fatal_step = 0;
- if (pm8001_mr32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_STATUS) !=
- MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
- goto moreData;
- } else {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", 4);
- pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
- pm8001_ha->forensic_info.data_buf.direct_len = 0;
- pm8001_ha->forensic_info.data_buf.direct_offset = 0;
- pm8001_ha->forensic_info.data_buf.read_len = 0;
+ if (reg_val != 0) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ return((char *)
+ pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ }
+ /* Poll status register until set to 2 or
+ * 3 for up to 2 seconds
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS);
+ } while (((reg_val != 2) && (reg_val != 3)) &&
+ time_before(jiffies, start));
+
+ if (reg_val < 2) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0,
+ MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ }
+ /* Read the next block of the debug data.*/
+ length_to_read = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+ if (length_to_read != 0x0) {
+ pm8001_ha->forensic_fatal_step = 0;
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 4);
+ pm8001_ha->forensic_info.data_buf.read_len
+ = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len
+ = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset
+ = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+ }
}
}
-
+ offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -317,6 +450,25 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
}
/**
@@ -521,6 +673,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressib, (offsetib + 0x18));
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
+ pm8001_ha->inbnd_q_tbl[i].pi_offset));
}
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
@@ -549,6 +706,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressob, (offsetob + 0x18));
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
+ pm8001_ha->outbnd_q_tbl[i].ci_offset));
}
}
@@ -582,6 +744,10 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Updated Fatal error interrupt vector 0x%x\n",
+ pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)));
+
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
@@ -591,6 +757,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Programming DW 0x21 in main cfg table with 0x%x\n",
+ pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@@ -629,6 +798,21 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "CI upper base addr 0x%x CI lower base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr));
}
/**
@@ -652,6 +836,21 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "PI upper base addr 0x%x PI lower base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr));
}
/**
@@ -669,9 +868,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
- max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
- max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
udelay(1);
@@ -797,7 +996,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
- PM8001_INIT_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value));
pcilogic = (value & 0xFC000000) >> 26;
@@ -885,7 +1084,12 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
(THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
+ payload.cfg_pg[0], payload.cfg_pg[1]));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
@@ -967,7 +1171,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1090,7 +1295,12 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
KEK_MGMT_SUBOP_KEYCARDUPDATE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Saving Encryption info to flash. payload 0x%x\n",
+ payload.new_curidx_ksop));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1241,7 +1451,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
pm8001_printk("reset register before write : 0x%x\n", regval));
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
- mdelay(500);
+ msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -1443,7 +1653,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Executing abort task end\n"));
if (ret) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1519,7 +1732,9 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n"));
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1570,6 +1785,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
@@ -1772,7 +1991,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -1826,7 +2045,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -1963,7 +2182,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
@@ -1974,7 +2193,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2062,6 +2281,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (unlikely(status))
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2365,7 +2590,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2382,6 +2607,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2435,7 +2662,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -2655,6 +2882,9 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
+ PM8001_DEV_DBG(pm8001_ha,
+ pm8001_printk("tag::0x%x status::0x%x\n", tag, status));
+
switch (status) {
case IO_SUCCESS:
@@ -2822,7 +3052,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -2873,7 +3103,8 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -2964,7 +3195,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -2984,7 +3215,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
if (pm8001_ha->flags == PM8001F_RUN_TIME)
- mdelay(200);/*delay a moment to wait disk to spinup*/
+ msleep(200);/*delay a moment to wait disk to spinup*/
pm8001_bytes_dmaed(pm8001_ha, phy_id);
}
@@ -3013,7 +3244,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port id %d, phy id %d link_rate %d portstate 0x%x\n",
port_id, phy_id, link_rate, portstate));
@@ -3101,7 +3332,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" Phy Down and(default) = 0x%x\n",
portstate));
break;
@@ -3130,8 +3361,10 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (status == 0) {
phy->phy_state = PHY_LINK_DOWN;
if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
+ phy->enable_completion != NULL) {
complete(phy->enable_completion);
+ phy->enable_completion = NULL;
+ }
}
return 0;
@@ -3191,7 +3424,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
port_id, phy_id, eventType, status));
@@ -3376,7 +3609,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type 0x%x\n", eventType));
break;
}
@@ -3758,7 +3991,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
ssp_coalesced_comp_resp(pm8001_ha, piomb);
break;
default:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
break;
}
@@ -3991,8 +4224,8 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
+ sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
return 0;
@@ -4200,7 +4433,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &ssp_cmd, q_index);
+ &ssp_cmd, sizeof(ssp_cmd), q_index);
return ret;
}
@@ -4441,7 +4674,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, q_index);
+ &sata_cmd, sizeof(sata_cmd), q_index);
return ret;
}
@@ -4465,23 +4698,9 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
- /*
- ** [0:7] PHY Identifier
- ** [8:11] link rate 1.5G, 3G, 6G
- ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
- ** [14] 0b disable spin up hold; 1b enable spin up hold
- ** [15] ob no change in current PHY analig setup 1b enable using SPAST
- */
- if (!IS_SPCV_12G(pm8001_ha->pdev))
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | phy_id);
- else
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
- phy_id);
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
/* SSC Disable and SAS Analog ST configuration */
/**
payload.ase_sh_lm_slr_phyid =
@@ -4494,9 +4713,10 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4738,8 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4584,7 +4805,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -4614,7 +4836,8 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4641,6 +4864,9 @@ static irqreturn_t
pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm80xx_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4669,7 +4895,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
}
@@ -4711,7 +4938,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index dc9ab7689060..701951a0f715 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -220,6 +220,9 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
+#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */
+#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */
+
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index d979f095aeda..2386bfb73c46 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -42,7 +42,7 @@ extern uint qedf_debug;
#define QEDF_LOG_LPORT 0x4000 /* lport logs */
#define QEDF_LOG_ELS 0x8000 /* ELS logs */
#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDF_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDF_LOG_TID 0x80000 /*
* FW TID context acquire
* free
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 59ca98f12afd..604856e72cfb 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1926,6 +1926,13 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
return 0;
}
+static void qedf_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_port_id(shost) = lport->port_id;
+}
+
static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
*shost)
{
@@ -1996,6 +2003,7 @@ static struct fc_function_template qedf_fc_transport_fn = {
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
+ .get_host_port_id = qedf_get_host_port_id,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 243acc8b520a..37d084086fd4 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -44,7 +44,7 @@ extern uint qedi_dbg_log;
#define QEDI_LOG_LPORT 0x4000 /* lport logs */
#define QEDI_LOG_ELS 0x8000 /* ELS logs */
#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
* free
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7259bce85e0e..ae97e2f310a3 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -102,8 +102,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla8044_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
- } else
+ } else {
+ ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
+ }
break;
case 4:
if (IS_P3P_TYPE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6ffa9877c28b..460f443f6471 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -591,19 +591,23 @@ typedef struct srb {
*/
uint8_t cmd_type;
uint8_t pad[3];
- atomic_t ref_count;
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+
uint32_t handle;
uint16_t flags;
uint16_t type;
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct srb *cmd_sp;
struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
@@ -2277,7 +2281,7 @@ typedef struct {
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
uint8_t fc4_type;
- uint8_t fc4f_nvme; /* nvme fc4 feature bits */
+ uint8_t fc4_features;
} sw_info_t;
/* FCP-4 types */
@@ -2445,7 +2449,7 @@ typedef struct fc_port {
u32 supported_classes;
uint8_t fc4_type;
- uint8_t fc4f_nvme;
+ uint8_t fc4_features;
uint8_t scan_state;
unsigned long last_queue_full;
@@ -2476,6 +2480,11 @@ typedef struct fc_port {
u16 n2n_chip_reset;
} fc_port_t;
+enum {
+ FC4_PRIORITY_NVME = 1,
+ FC4_PRIORITY_FCP = 2,
+};
+
#define QLA_FCPORT_SCAN 1
#define QLA_FCPORT_FOUND 2
@@ -4291,6 +4300,8 @@ struct qla_hw_data {
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
+ uint8_t fc4_type_priority;
+
atomic_t zio_threshold;
uint16_t last_zio_threshold;
@@ -4816,6 +4827,23 @@ struct sff_8247_a0 {
ha->current_topology == ISP_CFG_N || \
!ha->current_topology)
+#define NVME_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_NVME) \
+
+#define FCP_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_FCP) \
+
+#define NVME_ONLY_TARGET(fcport) \
+ (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \
+
+#define NVME_FCP_TARGET(fcport) \
+ (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
+
+#define NVME_TARGET(ha, fcport) \
+ ((NVME_FCP_TARGET(fcport) && \
+ (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
+ NVME_ONLY_TARGET(fcport)) \
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 732bb871c433..59f6903e5abe 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -2101,4 +2101,6 @@ struct qla_fcp_prio_cfg {
#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4)
#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4)
+#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d11416dcee4e..5b163ad85c34 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -917,4 +917,5 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5298ed10059f..446a9d6ba255 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -248,7 +248,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
WWN_SIZE);
fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
- FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+ FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
@@ -2887,7 +2887,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
- uint8_t fcp_scsi_features = 0;
+ uint8_t fcp_scsi_features = 0, nvme_features = 0;
struct ct_arg arg;
for (i = 0; i < ha->max_fibre_devices; i++) {
@@ -2933,14 +2933,19 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fcp_scsi_features &= 0x0f;
- if (fcp_scsi_features)
- list[i].fc4_type = FC4_TYPE_FCP_SCSI;
- else
- list[i].fc4_type = FC4_TYPE_OTHER;
+ if (fcp_scsi_features) {
+ list[i].fc4_type = FS_FC4TYPE_FCP;
+ list[i].fc4_features = fcp_scsi_features;
+ }
- list[i].fc4f_nvme =
+ nvme_features =
ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- list[i].fc4f_nvme &= 0xf;
+ nvme_features &= 0xf;
+
+ if (nvme_features) {
+ list[i].fc4_type |= FS_FC4TYPE_NVME;
+ list[i].fc4_features = nvme_features;
+ }
}
/* Last device exit. */
@@ -3005,7 +3010,7 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == QLA_FUNCTION_TIMEOUT)
- return;
+ goto done;
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3435,6 +3440,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
struct event_arg ea;
+ uint8_t fc4_scsi_feat;
+ uint8_t fc4_nvme_feat;
ql_dbg(ql_dbg_disc, vha, 0x2133,
"Async done-%s res %x ID %x. %8phC\n",
@@ -3442,24 +3449,25 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fcport->flags &= ~FCF_ASYNC_SENT;
ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
* The format of the FC-4 Features object, as defined by the FC-4,
* Shall be an array of 4-bit values, one for each type code value
*/
if (!res) {
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+ if (fc4_scsi_feat & 0xf) {
/* w1 b00:03 */
- fcport->fc4_type =
- ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
- fcport->fc4_type &= 0xf;
- }
+ fcport->fc4_type = FS_FC4TYPE_FCP;
+ fcport->fc4_features = fc4_scsi_feat & 0xf;
+ }
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+ if (fc4_nvme_feat & 0xf) {
/* w5 [00:03]/28h */
- fcport->fc4f_nvme =
- ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- fcport->fc4f_nvme &= 0xf;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ fcport->fc4_features = fc4_nvme_feat & 0xf;
}
}
@@ -3563,7 +3571,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
u8 recheck = 0;
u16 dup = 0, dup_cnt = 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
@@ -3579,11 +3587,23 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ goto out;
} else {
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Fabric scan failed on all retries.\n");
+ "%s: Fabric scan failed for %d retries.\n",
+ __func__, vha->scan.scan_retry);
+ /*
+ * Unable to scan any rports. logout loop below
+ * will unregister all sessions.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ fcport->logout_on_delete = 0;
+ }
+ }
+ goto login_logout;
}
- goto out;
}
vha->scan.scan_retry = 0;
@@ -3661,6 +3681,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
dup_cnt);
}
+login_logout:
/*
* Logout all previous fabric dev marked lost, except FCP2 devices.
*/
@@ -4047,7 +4068,7 @@ done_free_sp:
void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
@@ -4061,7 +4082,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
u32 rspsz;
unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (!vha->flags.online)
@@ -4070,14 +4091,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags & SF_SCANNING) {
spin_unlock_irqrestore(&vha->work_lock, flags);
- ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: scan active\n", __func__);
return rval;
}
vha->scan.scan_flags |= SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
if (fc4_type == FC4_TYPE_FCP_SCSI) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Performing FCP Scan\n", __func__);
if (sp)
@@ -4132,7 +4154,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
}
sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s scan list size %d\n", __func__, vha->scan.size);
memset(vha->scan.l, 0, vha->scan.size);
@@ -4197,8 +4219,8 @@ done_free_sp:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
if (vha->scan.scan_flags == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s: schedule\n", __func__);
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: Scan scheduled.\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
schedule_delayed_work(&vha->scan.scan_work, 5);
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1d041313ec52..6c28f38f8021 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,7 +17,6 @@
#include <asm/prom.h>
#endif
-#include <target/target_core_base.h>
#include "qla_target.h"
/*
@@ -101,8 +100,22 @@ static void qla24xx_abort_iocb_timeout(void *data)
u32 handle;
unsigned long flags;
+ if (sp->cmd_sp)
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
+ sp->cmd_sp->handle, sp->cmd_sp->type,
+ sp->handle, sp->type);
+ else
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout 2 - hdl=%x, type=%x\n",
+ sp->handle, sp->type);
+
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+ if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
+ sp->cmd_sp))
+ qpair->req->outstanding_cmds[handle] = NULL;
+
/* removing the abort */
if (qpair->req->outstanding_cmds[handle] == sp) {
qpair->req->outstanding_cmds[handle] = NULL;
@@ -111,6 +124,9 @@ static void qla24xx_abort_iocb_timeout(void *data)
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ if (sp->cmd_sp)
+ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+
abt->u.abt.comp_status = CS_TIMEOUT;
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
@@ -142,6 +158,7 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
sp->type = SRB_ABT_CMD;
sp->name = "abort";
sp->qpair = cmd_sp->qpair;
+ sp->cmd_sp = cmd_sp;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
@@ -328,7 +345,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
else
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x2072,
@@ -726,19 +743,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
current_login_state = e->current_login_state >> 4;
else
current_login_state = e->current_login_state & 0xf;
-
ql_dbg(ql_dbg_disc, vha, 0x20e2,
- "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
__func__, fcport->port_name,
e->current_login_state, fcport->fw_login_state,
- fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+ fcport->fc4_type, id.b24, fcport->d_id.b24,
+ loop_id, fcport->loop_id);
switch (fcport->disc_state) {
case DSC_DELETE_PEND:
@@ -1135,19 +1150,18 @@ static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- if (res == QLA_FUNCTION_TIMEOUT) {
- dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
- sp->u.iocb_cmd.u.mbx.in_dma);
- return;
- }
-
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+ if (res == QLA_FUNCTION_TIMEOUT)
+ goto done;
+
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
ea.sp = sp;
qla24xx_handle_gpdb_event(vha, &ea);
+done:
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
@@ -1225,13 +1239,13 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x211b,
"Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
- fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
+ fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc");
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -1382,14 +1396,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
- fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
- ea->rc);
+ "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
+ fcport->port_name, fcport->disc_state, pd->current_login_state,
+ fcport->fc4_type, ea->rc);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
ls = pd->current_login_state >> 4;
else
ls = pd->current_login_state & 0xf;
@@ -1578,7 +1592,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post %s PRLI\n",
__func__, __LINE__, fcport->port_name,
- fcport->fc4f_nvme ? "NVME" : "FC");
+ NVME_TARGET(vha->hw, fcport) ? "NVME" :
+ "FC");
qla24xx_post_prli_work(vha, fcport);
}
break;
@@ -1701,6 +1716,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
}
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post PRLI\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+}
+
/*
* RSCN(s) came in for this fcport, but the RSCN(s) was not able
* to be consumed by the fcport
@@ -1860,38 +1884,26 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
break;
}
- if (ea->fcport->fc4f_nvme) {
+ /*
+ * Retry PRLI with other FC-4 type if failure occurred on dual
+ * FCP/NVMe port
+ */
+ if (NVME_FCP_TARGET(ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post fc4 prli\n",
- __func__, __LINE__, ea->fcport->port_name);
- ea->fcport->fc4f_nvme = 0;
- qla24xx_post_prli_work(vha, ea->fcport);
- return;
+ "%s %d %8phC post %s prli\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ?
+ "NVMe" : "FCP");
+ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ else
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
}
- /* at this point both PRLI NVME & PRLI FCP failed */
- if (N2N_TOPO(vha->hw)) {
- if (ea->fcport->n2n_link_reset_cnt < 3) {
- ea->fcport->n2n_link_reset_cnt++;
- /*
- * remote port is not sending Plogi. Reset
- * link to kick start his state machine
- */
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- } else {
- ql_log(ql_log_warn, vha, 0x2119,
- "%s %d %8phC Unable to reconnect\n",
- __func__, __LINE__, ea->fcport->port_name);
- }
- } else {
- /*
- * switch connect. login failed. Take connection
- * down and allow relogin to retrigger
- */
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->keep_nport_handle = 0;
- qlt_schedule_sess_for_deletion(ea->fcport);
- }
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
}
}
@@ -1952,7 +1964,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- if (ea->fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2117,
"%s %d %8phC post prli\n",
__func__, __LINE__, ea->fcport->port_name);
@@ -2206,8 +2218,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x0061,
"Configure NVRAM parameters...\n");
+ /* Let priority default to FCP, can be overridden by nvram_config */
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
ha->isp_ops->nvram_config(vha);
+ if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
+ ha->fc4_type_priority != FC4_PRIORITY_NVME)
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
+ ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
+ ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
+
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
ql_log(ql_log_info, vha, 0x0077,
@@ -5382,7 +5404,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_iidma_fcport(vha, fcport);
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
fcport->disc_state = DSC_LOGIN_COMPLETE;
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
@@ -5710,11 +5732,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
new_fcport->fc4_type = swl[swl_idx].fc4_type;
new_fcport->nvme_flag = 0;
- new_fcport->fc4f_nvme = 0;
if (vha->flags.nvme_enabled &&
- swl[swl_idx].fc4f_nvme) {
- new_fcport->fc4f_nvme =
- swl[swl_idx].fc4f_nvme;
+ swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
ql_log(ql_log_info, vha, 0x2131,
"FOUND: NVME port %8phC as FC Type 28h\n",
new_fcport->port_name);
@@ -5770,7 +5789,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
if (ql2xgffidenable &&
- (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+ (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
@@ -5839,7 +5858,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
fcport->disc_state = DSC_GNL;
vha->fcport_count--;
@@ -5879,8 +5898,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
if (fcport->scan_state == QLA_FCPORT_SCAN) {
@@ -5903,7 +5921,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
}
}
- if (fcport->scan_state == QLA_FCPORT_FOUND)
+ if (fcport->scan_state == QLA_FCPORT_FOUND &&
+ (fcport->flags & FCF_LOGIN_NEEDED) != 0)
qla24xx_fcport_handle_login(vha, fcport);
}
return (rval);
@@ -8514,6 +8533,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* N2N: driver will initiate Login instead of FW */
icb->firmware_options_3 |= BIT_8;
+ /* Determine NVMe/FCP priority for target ports */
+ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -9003,8 +9025,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
struct qla_hw_data *ha = qpair->hw;
qpair->delete_in_progress = 1;
- while (atomic_read(&qpair->ref_count))
- msleep(500);
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0c3d907af769..352aba4127f7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -307,3 +307,15 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
WRT_REG_DWORD(req->req_q_in, req->ring_index);
}
+
+static inline int
+qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
+{
+ uint32_t data;
+
+ data =
+ ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
+
+
+ return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 518eb954cf42..b25f87ff8cde 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2740,6 +2740,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct scsi_qla_host *vha = sp->vha;
struct event_arg ea;
struct qla_work_evt *e;
+ struct fc_port *conflict_fcport;
+ port_id_t cid; /* conflict Nport id */
+ u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
"%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
@@ -2751,14 +2755,101 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- if (res) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
+ switch (fw_status[0]) {
+ case CS_DATA_UNDERRUN:
+ case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
- ea.data[0] = MBS_COMMAND_COMPLETE;
- ea.sp = sp;
- qla24xx_handle_plogi_done_event(vha, &ea);
+ ea.rc = res;
+ qla_handle_els_plogi_done(vha, &ea);
+ break;
+
+ case CS_IOCB_ERROR:
+ switch (fw_status[1]) {
+ case LSC_SCODE_PORTID_USED:
+ lid = fw_status[2] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(fcport->port_name),
+ fcport->d_id, lid, &conflict_fcport);
+ if (conflict_fcport) {
+ /*
+ * Another fcport shares the same
+ * loop_id & nport id; conflict
+ * fcport needs to finish cleanup
+ * before this fcport can proceed
+ * to login.
+ */
+ conflict_fcport->conflict = fcport;
+ fcport->login_pause = 1;
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ fcport->loop_id = lid;
+ fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ break;
+
+ case LSC_SCODE_NPORT_USED:
+ cid.b.domain = (fw_status[2] >> 16) & 0xff;
+ cid.b.area = (fw_status[2] >> 8) & 0xff;
+ cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.rsvd_1 = 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, cid.b24);
+ set_bit(fcport->loop_id,
+ vha->hw->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_post_gnl_work(vha, fcport);
+ break;
+
+ case LSC_SCODE_NOXCB:
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xd046,
+ "Exchange starvation. Resetting RISC\n");
+ vha->hw->exch_starvation = 0;
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ /* fall through */
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
}
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
@@ -2792,11 +2883,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
return -ENOMEM;
}
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_PEND;
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 009fd5a33fcd..2601d7673c37 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1227,11 +1227,32 @@ global_port_update:
break;
case MBA_IDC_AEN:
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
- qla83xx_handle_8200_aen(vha, mb);
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->flags.fw_init_done = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) ||
+ (mb[2] & BIT_8)) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "MPI Heartbeat stop. FW dump needed\n");
+ ha->fw_dump_mpi = 1;
+ ha->isp_ops->fw_dump(vha, 1);
+ }
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (IS_QLA83XX(ha)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[5] = RD_REG_WORD(&reg24->mailbox5);
+ mb[6] = RD_REG_WORD(&reg24->mailbox6);
+ mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ qla83xx_handle_8200_aen(vha, mb);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+ }
break;
case MBA_DPORT_DIAGNOSTICS:
@@ -2466,6 +2487,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ if (sp->abort)
+ sp->aborted = 1;
+ else
+ sp->completed = 1;
+
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
ql_dbg(ql_dbg_io, vha, 0x3015,
@@ -3624,7 +3650,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
ql_log(ql_log_info, vha, 0x0037,
- "Falling back-to MSI mode -%d.\n", ret);
+ "Falling back-to MSI mode -- ret=%d.\n", ret);
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
@@ -3632,13 +3658,13 @@ skip_msix:
goto skip_msi;
ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
- if (!ret) {
+ if (ret > 0) {
ql_dbg(ql_dbg_init, vha, 0x0038,
"MSI: Enabled.\n");
ha->flags.msi_enabled = 1;
} else
ql_log(ql_log_warn, vha, 0x0039,
- "Falling back-to INTa mode -- %d.\n", ret);
+ "Falling back-to INTa mode -- ret=%d.\n", ret);
skip_msi:
/* Skip INTx on ISP82xx. */
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 4a1f21c11758..0cf94f05f008 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1932,7 +1932,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(ha, fcport)) {
current_login_state = pd24->current_login_state >> 4;
last_login_state = pd24->last_login_state >> 4;
} else {
@@ -3899,8 +3899,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
+ fcport->fc4_type = FS_FC4TYPE_FCP;
if (vha->flags.nvme_enabled)
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
switch (fcport->disc_state) {
case DSC_DELETED:
@@ -6287,17 +6288,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
case QLA_SUCCESS:
ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
__func__, sp->name);
- sp->free(sp);
break;
default:
ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- sp->free(sp);
break;
}
- return rval;
-
done_free_sp:
sp->free(sp);
done:
@@ -6362,7 +6359,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
uint64_t zero = 0;
u8 current_login_state, last_login_state;
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
current_login_state = pd->current_login_state >> 4;
last_login_state = pd->last_login_state >> 4;
} else {
@@ -6397,8 +6394,8 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
- if (fcport->fc4f_nvme) {
- fcport->port_type = 0;
+ if (NVME_TARGET(vha->hw, fcport)) {
+ fcport->port_type = FCT_NVME;
if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
fcport->port_type |= FCT_NVME_INITIATOR;
if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 238240984bc1..eabc5127174e 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -946,7 +946,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
if (!sp)
- goto done;
+ return rval;
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
@@ -962,7 +962,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
ql_dbg(ql_dbg_async, vha, 0xffff,
"%s: %s Failed submission. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ goto done;
}
ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
@@ -980,16 +980,13 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
case QLA_SUCCESS:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
__func__, sp->name);
- goto done_free_sp;
+ break;
default:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ break;
}
done:
- return rval;
-
-done_free_sp:
sp->free(sp);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 6cc19e060afc..941aa53363f5 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -224,8 +224,8 @@ static void qla_nvme_abort_work(struct work_struct *work)
if (ha->flags.host_shutting_down) {
ql_log(ql_log_info, sp->fcport->vha, 0xffff,
- "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
- __func__, sp, sp->type, atomic_read(&sp->ref_count));
+ "%s Calling done on sp: %p, type: 0x%x\n",
+ __func__, sp, sp->type);
sp->done(sp, 0);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 726ad4cbf4a6..8b84bc4a6ac8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -698,11 +698,6 @@ void qla2x00_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -794,11 +789,6 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -903,7 +893,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
+
CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
@@ -985,18 +975,16 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
CMD_SP(cmd) = (void *)sp;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
- sp->qpair = qpair;
rval = ha->isp_ops->start_scsi_mq(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
if (rval == QLA_INTERFACE_ERROR)
- goto qc24_fail_command;
+ goto qc24_free_sp_fail_command;
goto qc24_host_busy_free_sp;
}
@@ -1008,6 +996,11 @@ qc24_host_busy_free_sp:
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
+qc24_free_sp_fail_command:
+ sp->free(sp);
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+
qc24_fail_command:
cmd->scsi_done(cmd);
@@ -1184,16 +1177,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-static int
-sp_get(struct srb *sp)
-{
- if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count))
- /* kref get fail */
- return ENXIO;
- else
- return 0;
-}
-
#define ISP_REG_DISCONNECT 0xffffffffU
/**************************************************************************
* qla2x00_isp_reg_stat
@@ -1249,6 +1232,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
uint64_t lun;
int rval;
struct qla_hw_data *ha = vha->hw;
+ uint32_t ratov_j;
+ struct qla_qpair *qpair;
+ unsigned long flags;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
@@ -1261,13 +1247,26 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
sp = scsi_cmd_priv(cmd);
+ qpair = sp->qpair;
- if (sp->fcport && sp->fcport->deleted)
+ if ((sp->fcport && sp->fcport->deleted) || !qpair)
return SUCCESS;
- /* Return if the command has already finished. */
- if (sp_get(sp))
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (sp->completed) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return SUCCESS;
+ }
+
+ if (sp->abort || sp->aborted) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return FAILED;
+ }
+
+ sp->abort = 1;
+ sp->comp = &comp;
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -1276,47 +1275,37 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
+ /*
+ * Abort will release the original Command/sp from FW. Let the
+ * original command call scsi_done. In return, he will wakeup
+ * this sleeping thread.
+ */
rval = ha->isp_ops->abort_command(sp);
+
ql_dbg(ql_dbg_taskm, vha, 0x8003,
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
+ /* Wait for the command completion. */
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- /*
- * The command has been aborted. That means that the firmware
- * won't report a completion.
- */
- sp->done(sp, DID_ABORT << 16);
- ret = SUCCESS;
- break;
- case QLA_FUNCTION_PARAMETER_ERROR: {
- /* Wait for the command completion. */
- uint32_t ratov = ha->r_a_tov/10;
- uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
-
- WARN_ON_ONCE(sp->comp);
- sp->comp = &comp;
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_dbg(ql_dbg_taskm, vha, 0xffff,
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
- __func__, ha->r_a_tov);
+ __func__, ha->r_a_tov/10);
ret = FAILED;
} else {
ret = SUCCESS;
}
break;
- }
default:
- /*
- * Either abort failed or abort and completion raced. Let
- * the SCSI core retry the abort in the former case.
- */
ret = FAILED;
break;
}
sp->comp = NULL;
- atomic_dec(&sp->ref_count);
+
ql_log(ql_log_info, vha, 0x801c,
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
vha->host_no, id, lun, ret);
@@ -1708,32 +1697,53 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
int rval;
+ bool ret_cmd;
+ uint32_t ratov_j;
- if (sp_get(sp))
+ if (qla2x00_chip_is_down(vha)) {
+ sp->done(sp, res);
return;
+ }
if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
(sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!qla2x00_isp_reg_stat(ha))) {
+ if (sp->comp) {
+ sp->done(sp, res);
+ return;
+ }
+
sp->comp = &comp;
+ sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
- rval = ha->isp_ops->abort_command(sp);
+ rval = ha->isp_ops->abort_command(sp);
+ /* Wait for command completion. */
+ ret_cmd = false;
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- sp->done(sp, res);
+ if (wait_for_completion_timeout(&comp, ratov_j)) {
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
+ __func__, ha->r_a_tov/10);
+ ret_cmd = true;
+ }
+ /* else FW return SP to driver */
break;
- case QLA_FUNCTION_PARAMETER_ERROR:
- wait_for_completion(&comp);
+ default:
+ ret_cmd = true;
break;
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- sp->comp = NULL;
+ if (ret_cmd && (!sp->completed || !sp->aborted))
+ sp->done(sp, res);
+ } else {
+ sp->done(sp, res);
}
-
- atomic_dec(&sp->ref_count);
}
static void
@@ -1755,7 +1765,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- req->outstanding_cmds[cnt] = NULL;
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1777,6 +1786,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
default:
break;
}
+ req->outstanding_cmds[cnt] = NULL;
}
}
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
@@ -3492,6 +3502,29 @@ disable_device:
return ret;
}
+static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
+{
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ struct qla_hw_data *ha;
+
+ if (!base_vha)
+ return;
+
+ ha = base_vha->hw;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list)
+ set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
+
+ /*
+ * Indicate device removal to prevent future board_disable
+ * and wait until any pending board_disable has completed.
+ */
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
static void
qla2x00_shutdown(struct pci_dev *pdev)
{
@@ -3508,7 +3541,7 @@ qla2x00_shutdown(struct pci_dev *pdev)
* Prevent future board_disable and wait
* until any pending board_disable has completed.
*/
- set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
+ __qla_set_remove_flag(vha);
cancel_work_sync(&ha->board_disable);
if (!atomic_read(&pdev->enable_cnt))
@@ -3668,10 +3701,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha = base_vha->hw;
ql_log(ql_log_info, base_vha, 0xb079,
"Removing driver\n");
-
- /* Indicate device removal to prevent future board_disable and wait
- * until any pending board_disable has completed. */
- set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ __qla_set_remove_flag(base_vha);
cancel_work_sync(&ha->board_disable);
/*
@@ -4666,7 +4696,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->sfp_data = NULL;
if (ha->flt)
- dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ dma_free_coherent(&ha->pdev->dev,
+ sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
ha->flt, ha->flt_dma);
ha->flt = NULL;
ha->flt_dma = 0;
@@ -5042,19 +5073,17 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
- fcport->fc4_type = FC4_TYPE_FCP_SCSI;
-
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
- fcport->fc4_type = FC4_TYPE_OTHER;
- fcport->fc4f_nvme = FC4_TYPE_NVME;
- }
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
- if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+ fcport->fc4_type = e->u.new_sess.fc4_type;
+ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
+ fcport->fc4_type = FS_FC4TYPE_FCP;
fcport->n2n_flag = 1;
+ if (vha->flags.nvme_enabled)
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ }
} else {
ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -5158,7 +5187,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->flags &= ~FCF_FABRIC_DEVICE;
fcport->keep_nport_handle = 1;
if (vha->flags.nvme_enabled) {
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type =
+ (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
fcport->n2n_flag = 1;
}
fcport->fw_login_state = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a06e56224a55..51b275a575a5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -463,7 +463,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
case IMMED_NOTIFY_TYPE:
{
- struct scsi_qla_host *host = vha;
+ struct scsi_qla_host *host;
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)pkt;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 294d77c02cdf..5b0c057def2b 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -10,6 +10,7 @@
#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
#define IOBASE(vha) IOBAR(ISPREG(vha))
+#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
@@ -261,6 +262,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ulong start = le32_to_cpu(ent->t262.start_addr);
ulong end = le32_to_cpu(ent->t262.end_addr);
ulong dwords;
+ int rc;
ql_dbg(ql_dbg_misc, vha, 0xd206,
"%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
@@ -308,7 +310,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
dwords = end - start + 1;
if (buf) {
buf += *len;
- qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
+ __func__, area, start, end);
+ return INVALID_ENTRY;
+ }
}
*len += dwords * sizeof(uint32_t);
done:
@@ -838,6 +846,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ent = qla27xx_find_entry(type)(vha, ent, buf, len);
if (!ent)
break;
+
+ if (ent == INVALID_ENTRY) {
+ *len = 0;
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Unable to capture FW dump");
+ goto bailout;
+ }
}
if (tmp->count)
@@ -847,6 +862,9 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
if (ent)
ql_dbg(ql_dbg_misc, vha, 0xd019,
"%s: missing end entry\n", __func__);
+
+bailout:
+ cpu_to_le32s(&tmp->count); /* endianize residual count */
}
static void
@@ -999,8 +1017,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
uint j;
ulong len;
void *buf = vha->hw->fw_dump;
+ uint count = vha->hw->fw_dump_mpi ? 2 : 1;
- for (j = 0; j < 2; j++, fwdt++, buf += len) {
+ for (j = 0; j < count; j++, fwdt++, buf += len) {
ql_log(ql_log_warn, vha, 0xd011,
"-> fwdt%u running...\n", j);
if (!fwdt->template) {
@@ -1010,7 +1029,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
}
len = qla27xx_execute_fwdt_template(vha,
fwdt->template, buf);
- if (len != fwdt->dump_size) {
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
ql_log(ql_log_warn, vha, 0xd013,
"-> fwdt%u fwdump residual=%+ld\n",
j, fwdt->dump_size - len);
@@ -1025,6 +1046,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
+bailout:
+ vha->hw->fw_dump_mpi = 0;
#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index a8f2a953ceff..03bd3b712b77 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.19-k"
+#define QLA2XXX_VERSION "10.01.00.21-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index dac9a7013208..02636b4785c5 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -640,9 +640,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
- dma_free_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
goto exit_init_fw_cb;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7a1b6c76f263..930e4803d888 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -186,7 +186,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
struct scsi_driver *drv;
unsigned int good_bytes;
- scsi_device_unbusy(sdev);
+ scsi_device_unbusy(sdev, cmd);
/*
* Clear the flags that say that the device/target/host is no longer
@@ -465,10 +465,14 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
+ if (vpd_buf->data[i] == 0x0)
+ scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
if (vpd_buf->data[i] == 0x80)
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
if (vpd_buf->data[i] == 0x83)
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
+ if (vpd_buf->data[i] == 0x89)
+ scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d323523f5f9d..44cb054d5e66 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1025,7 +1025,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
int arr_len, unsigned int off_dst)
{
- int act_len, n;
+ unsigned int act_len, n;
struct scsi_data_buffer *sdb = &scp->sdb;
off_t skip = off_dst;
@@ -1039,7 +1039,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
- n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
+ n = scsi_bufflen(scp) - (off_dst + act_len);
scsi_set_resid(scp, min(scsi_get_resid(scp), n));
return 0;
}
@@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 91c007d26c1e..3e7a45d0daca 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -189,7 +189,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* active on the host/device.
*/
if (unbusy)
- scsi_device_unbusy(device);
+ scsi_device_unbusy(device, cmd);
/*
* Requeue this command. It will go before all other commands
@@ -321,20 +321,20 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
}
/*
- * Decrement the host_busy counter and wake up the error handler if necessary.
- * Avoid as follows that the error handler is not woken up if shost->host_busy
- * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
- * with an RCU read lock in this function to ensure that this function in its
- * entirety either finishes before scsi_eh_scmd_add() increases the
+ * Wake up the error handler if necessary. Avoid as follows that the error
+ * handler is not woken up if host in-flight requests number ==
+ * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in
+ * its entirety either finishes before scsi_eh_scmd_add() increases the
* host_failed counter or that it notices the shost state change made by
* scsi_eh_scmd_add().
*/
-static void scsi_dec_host_busy(struct Scsi_Host *shost)
+static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
unsigned long flags;
rcu_read_lock();
- atomic_dec(&shost->host_busy);
+ __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -344,12 +344,12 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
rcu_read_unlock();
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
@@ -430,9 +430,6 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if (shost->can_queue > 0 &&
- atomic_read(&shost->host_busy) >= shost->can_queue)
- return true;
if (atomic_read(&shost->host_blocked) > 0)
return true;
if (shost->host_self_blocked)
@@ -1139,6 +1136,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
int retries;
+ bool in_flight;
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
flags |= SCMD_INITIALIZED;
@@ -1147,6 +1145,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
jiffies_at_alloc = cmd->jiffies_at_alloc;
retries = cmd->retries;
+ in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
@@ -1158,6 +1157,8 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies_at_alloc;
cmd->retries = retries;
+ if (in_flight)
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
scsi_add_cmd_to_list(cmd);
}
@@ -1367,16 +1368,14 @@ out_dec:
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ struct scsi_cmnd *cmd)
{
- unsigned int busy;
-
if (scsi_host_in_recovery(shost))
return 0;
- busy = atomic_inc_return(&shost->host_busy) - 1;
if (atomic_read(&shost->host_blocked) > 0) {
- if (busy)
+ if (scsi_host_busy(shost) > 0)
goto starved;
/*
@@ -1390,8 +1389,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (shost->can_queue > 0 && busy >= shost->can_queue)
- goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1403,6 +1400,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
spin_unlock_irq(shost->host_lock);
}
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+
return 1;
starved:
@@ -1411,7 +1410,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
return 0;
}
@@ -1665,7 +1664,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_RESOURCE;
if (!scsi_target_queue_ready(shost, sdev))
goto out_put_budget;
- if (!scsi_host_queue_ready(q, shost, sdev))
+ if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1697,7 +1696,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index c6ed0b12e807..c91fa3feb930 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -390,6 +390,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
const char *mlret_string = scsi_mlreturn_string(disposition);
const char *hb_string = scsi_hostbyte_string(cmd->result);
const char *db_string = scsi_driverbyte_string(cmd->result);
+ unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
@@ -431,10 +432,15 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (db_string)
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=%s", db_string);
+ "driverbyte=%s ", db_string);
else
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=0x%02x", driver_byte(cmd->result));
+ "driverbyte=0x%02x ",
+ driver_byte(cmd->result));
+
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "cmd_age=%lus", cmd_age);
+
out_printk:
dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
scsi_log_release_buffer(logbuf);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index cc2859d76d81..3bff9f7aa684 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -87,7 +87,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
-extern void scsi_device_unbusy(struct scsi_device *sdev);
+extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index cc51f4756077..677b5c5403d2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -437,6 +437,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
+ struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
unsigned long flags;
sdev = container_of(work, struct scsi_device, ew.work);
@@ -466,16 +467,24 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
sdev->request_queue = NULL;
mutex_lock(&sdev->inquiry_mutex);
+ vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
+ lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80,
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
+ if (vpd_pg0)
+ kfree_rcu(vpd_pg0, rcu);
if (vpd_pg83)
kfree_rcu(vpd_pg83, rcu);
if (vpd_pg80)
kfree_rcu(vpd_pg80, rcu);
+ if (vpd_pg89)
+ kfree_rcu(vpd_pg89, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -868,6 +877,8 @@ static struct bin_attribute dev_attr_vpd_##_page = { \
sdev_vpd_pg_attr(pg83);
sdev_vpd_pg_attr(pg80);
+sdev_vpd_pg_attr(pg89);
+sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -1200,12 +1211,18 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
struct scsi_device *sdev = to_scsi_device(dev);
+ if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0)
+ return 0;
+
if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
return 0;
if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
return 0;
+ if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
+ return 0;
+
return S_IRUGO;
}
@@ -1248,8 +1265,10 @@ static struct attribute *scsi_sdev_attrs[] = {
};
static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+ &dev_attr_vpd_pg0,
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
+ &dev_attr_vpd_pg89,
&dev_attr_inquiry,
NULL
};
@@ -1309,7 +1328,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
device_enable_async_suspend(&sdev->sdev_gendev);
scsi_autopm_get_target(starget);
pm_runtime_set_active(&sdev->sdev_gendev);
- pm_runtime_forbid(&sdev->sdev_gendev);
+ if (!sdev->rpm_autosuspend)
+ pm_runtime_forbid(&sdev->sdev_gendev);
pm_runtime_enable(&sdev->sdev_gendev);
scsi_autopm_put_target(starget);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0f17e7dac1b0..ac35c301c792 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -9,7 +9,7 @@
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
-#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8]))
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
@@ -18,15 +18,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;
@@ -36,17 +39,12 @@ static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba, txlen;
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[7] << 8);
- txlen |= cdb[8];
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be16(&cdb[7]);
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
@@ -61,19 +59,12 @@ static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[6] << 24);
- txlen |= (cdb[7] << 16);
- txlen |= (cdb[8] << 8);
- txlen |= cdb[9];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u32 lba, txlen;
+
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
@@ -84,23 +75,13 @@ static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- txlen |= (cdb[10] << 24);
- txlen |= (cdb[11] << 16);
- txlen |= (cdb[12] << 8);
- txlen |= cdb[13];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u64 lba;
+ u32 txlen;
+
+ lba = get_unaligned_be64(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
@@ -115,8 +96,8 @@ static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0, txlen = 0;
- u32 ei_lbrt = 0;
+ u64 lba;
+ u32 ei_lbrt, txlen;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
@@ -136,26 +117,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[12] << 56);
- lba |= ((u64)cdb[13] << 48);
- lba |= ((u64)cdb[14] << 40);
- lba |= ((u64)cdb[15] << 32);
- lba |= (cdb[16] << 24);
- lba |= (cdb[17] << 16);
- lba |= (cdb[18] << 8);
- lba |= cdb[19];
- ei_lbrt |= (cdb[20] << 24);
- ei_lbrt |= (cdb[21] << 16);
- ei_lbrt |= (cdb[22] << 8);
- ei_lbrt |= cdb[23];
- txlen |= (cdb[28] << 24);
- txlen |= (cdb[29] << 16);
- txlen |= (cdb[30] << 8);
- txlen |= cdb[31];
-
- trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
- cmd, (unsigned long long)lba,
- (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+ lba = get_unaligned_be64(&cdb[12]);
+ ei_lbrt = get_unaligned_be32(&cdb[20]);
+ txlen = get_unaligned_be32(&cdb[28]);
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u",
+ cmd, lba, txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
@@ -170,7 +137,7 @@ static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- unsigned int regions = cdb[7] << 8 | cdb[8];
+ unsigned int regions = get_unaligned_be16(&cdb[7]);
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
@@ -182,8 +149,8 @@ static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0;
- u32 alloc_len = 0;
+ u64 lba;
+ u32 alloc_len;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
@@ -197,21 +164,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- alloc_len |= (cdb[10] << 24);
- alloc_len |= (cdb[11] << 16);
- alloc_len |= (cdb[12] << 8);
- alloc_len |= cdb[13];
-
- trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
- (unsigned long long)lba, alloc_len);
+ lba = get_unaligned_be64(&cdb[2]);
+ alloc_len = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len);
out:
trace_seq_putc(p, 0);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ef138c57e2a6..182fd25c7c43 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1391,9 +1391,6 @@ static void sas_expander_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_expander_device *edev = rphy_to_expander_device(rphy);
- if (rphy->q)
- blk_cleanup_queue(rphy->q);
-
put_device(dev->parent);
kfree(edev);
}
@@ -1403,9 +1400,6 @@ static void sas_end_device_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_end_device *edev = rphy_to_end_device(rphy);
- if (rphy->q)
- blk_cleanup_queue(rphy->q);
-
put_device(dev->parent);
kfree(edev);
}
@@ -1634,8 +1628,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
}
sas_rphy_unlink(rphy);
- if (rphy->q)
- bsg_unregister_queue(rphy->q);
+ bsg_remove_queue(rphy->q);
transport_remove_device(dev);
device_del(dev);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 13925021473d..cea625906440 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -122,8 +122,6 @@ static void sd_eh_reset(struct scsi_cmnd *);
static int sd_eh_action(struct scsi_cmnd *, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
-static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
-static void sd_print_result(const struct scsi_disk *, const char *, int);
static DEFINE_IDA(sd_index_ida);
@@ -3390,6 +3388,10 @@ static int sd_probe(struct device *dev)
}
blk_pm_runtime_init(sdp->request_queue, dev);
+ if (sdp->rpm_autosuspend) {
+ pm_runtime_set_autosuspend_delay(dev,
+ sdp->host->hostt->rpm_autosuspend_delay);
+ }
device_add_disk(dev, gd, NULL);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
@@ -3722,15 +3724,13 @@ static void __exit exit_sd(void)
module_init(init_sd);
module_exit(exit_sd);
-static void sd_print_sense_hdr(struct scsi_disk *sdkp,
- struct scsi_sense_hdr *sshdr)
+void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
{
scsi_print_sense_hdr(sdkp->device,
sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
}
-static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
- int result)
+void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
{
const char *hb_string = scsi_hostbyte_string(result);
const char *db_string = scsi_driverbyte_string(result);
@@ -3745,4 +3745,3 @@ static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
"%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
msg, host_byte(result), driver_byte(result));
}
-
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 42fd3f00e4a5..50fff0bf8c8e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -241,4 +241,7 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
#endif /* CONFIG_BLK_DEV_ZONED */
+void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr);
+void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result);
+
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 0e5ede48f045..e0bd4cf17230 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -80,9 +80,11 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
timeout, SD_MAX_RETRIES, NULL);
if (result) {
sd_printk(KERN_ERR, sdkp,
- "REPORT ZONES lba %llu failed with %d/%d\n",
- (unsigned long long)lba,
- host_byte(result), driver_byte(result));
+ "REPORT ZONES start lba %llu failed\n", lba);
+ sd_print_result(sdkp, "REPORT ZONES", result);
+ if (driver_byte(result) == DRIVER_SENSE &&
+ scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
return -EIO;
}
@@ -412,8 +414,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- blk_queue_chunk_sectors(sdkp->disk->queue,
- logical_to_sectors(sdkp->device, zone_blocks));
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
blk_queue_required_elevator_features(sdkp->disk->queue,
ELEVATOR_F_ZBD_SEQ_WRITE);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9e4ef22b3579..160748ad9c0f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -429,18 +429,26 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
- if (!access_ok(buf, count))
- return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
- old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
- if (!old_hdr)
- return -ENOMEM;
- if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
- retval = -EFAULT;
- goto free_old_hdr;
- }
+ old_hdr = memdup_user(buf, SZ_SG_HEADER);
+ if (IS_ERR(old_hdr))
+ return PTR_ERR(old_hdr);
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
+ /*
+ * This is stupid.
+ *
+ * We're copying the whole sg_io_hdr_t from user
+ * space just to get the 'pack_id' field. But the
+ * field is at different offsets for the compat
+ * case, so we'll use "get_sg_io_hdr()" to copy
+ * the whole thing and convert it.
+ *
+ * We could do something like just calculating the
+ * offset based of 'in_compat_syscall()', but the
+ * 'compat_sg_io_hdr' definition is in the wrong
+ * place for that.
+ */
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
@@ -537,7 +545,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
- if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
+ if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
@@ -623,11 +631,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
- if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
+ if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
@@ -636,13 +642,15 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
+ buf += SZ_SG_HEADER;
+ if (get_user(opcode, buf))
+ return -EFAULT;
+
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
- buf += SZ_SG_HEADER;
- __get_user(opcode, buf);
mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
@@ -685,7 +693,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (__copy_from_user(cmnd, buf, cmd_size))
+ if (copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
@@ -720,8 +728,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
if (count < SZ_SG_IO_HDR)
return -EINVAL;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
@@ -759,11 +765,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
- if (!access_ok(hp->cmdp, hp->cmd_len)) {
- sg_remove_request(sfp, srp);
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
- }
- if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
+ if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
@@ -940,8 +942,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
- if (!access_ok(p, SZ_SG_IO_HDR))
- return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
@@ -986,26 +986,21 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SG_GET_LOW_DMA:
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
- if (!access_ok(p, sizeof (sg_scsi_id_t)))
- return -EFAULT;
- else {
- sg_scsi_id_t __user *sg_idp = p;
+ {
+ sg_scsi_id_t v;
if (atomic_read(&sdp->detaching))
return -ENODEV;
- __put_user((int) sdp->device->host->host_no,
- &sg_idp->host_no);
- __put_user((int) sdp->device->channel,
- &sg_idp->channel);
- __put_user((int) sdp->device->id, &sg_idp->scsi_id);
- __put_user((int) sdp->device->lun, &sg_idp->lun);
- __put_user((int) sdp->device->type, &sg_idp->scsi_type);
- __put_user((short) sdp->device->host->cmd_per_lun,
- &sg_idp->h_cmd_per_lun);
- __put_user((short) sdp->device->queue_depth,
- &sg_idp->d_queue_depth);
- __put_user(0, &sg_idp->unused[0]);
- __put_user(0, &sg_idp->unused[1]);
+ memset(&v, 0, sizeof(v));
+ v.host_no = sdp->device->host->host_no;
+ v.channel = sdp->device->channel;
+ v.scsi_id = sdp->device->id;
+ v.lun = sdp->device->lun;
+ v.scsi_type = sdp->device->type;
+ v.h_cmd_per_lun = sdp->device->host->cmd_per_lun;
+ v.d_queue_depth = sdp->device->queue_depth;
+ if (copy_to_user(p, &v, sizeof(sg_scsi_id_t)))
+ return -EFAULT;
return 0;
}
case SG_SET_FORCE_PACK_ID:
@@ -1015,20 +1010,16 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
- if (!access_ok(ip, sizeof (int)))
- return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
- __put_user(srp->header.pack_id, ip);
- return 0;
+ return put_user(srp->header.pack_id, ip);
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- __put_user(-1, ip);
- return 0;
+ return put_user(-1, ip);
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
@@ -2017,12 +2008,12 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 79d2af36f655..1129fe7a27ed 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -276,7 +276,9 @@ struct pqi_raid_path_request {
u8 reserved4 : 2;
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
- u8 cdb[32];
+ u8 cdb[16];
+ u8 reserved6[12];
+ __le32 timeout;
struct pqi_sg_descriptor
sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -385,7 +387,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[4];
+ u8 reserved[2];
+ __le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
__le16 outbound_queue_id_to_manage;
@@ -445,7 +448,7 @@ struct pqi_vendor_general_response {
struct pqi_ofa_memory {
__le64 signature; /* "OFA_QRM" */
- __le16 version; /* version of this struct(1 = 1st version) */
+ __le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
@@ -761,6 +764,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_OFA 0
#define PQI_FIRMWARE_FEATURE_SMP 1
#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -826,10 +831,17 @@ union pqi_reset_register {
struct report_lun_header {
__be32 list_length;
- u8 extended_response;
+ u8 flags;
u8 reserved[3];
};
+/* for flags field of struct report_lun_header */
+#define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID (1 << 0)
+#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5)
+#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6)
+
+#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1)
+
struct report_log_lun_extended_entry {
u8 lunid[8];
u8 volume_id[16];
@@ -851,7 +863,7 @@ struct report_phys_lun_extended_entry {
};
/* for device_flags field of struct report_phys_lun_extended_entry */
-#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
+#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8
struct report_phys_lun_extended {
struct report_lun_header header;
@@ -864,7 +876,7 @@ struct raid_map_disk_data {
u8 reserved[2];
};
-/* constants for flags field of RAID map */
+/* for flags field of RAID map */
#define RAID_MAP_ENCRYPTION_ENABLED 0x1
struct raid_map {
@@ -907,7 +919,6 @@ struct pqi_scsi_dev {
u8 scsi3addr[8];
__be64 wwid;
u8 volume_id[16];
- u8 unique_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
u8 is_expander_smp_device : 1;
@@ -954,13 +965,9 @@ struct pqi_scsi_dev {
};
/* VPD inquiry pages */
-#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
-#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
-#define SCSI_VPD_HEADER_SZ 4
-#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */
#define VPD_PAGE (1 << 8)
@@ -1130,13 +1137,16 @@ struct pqi_ctrl_info {
struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
- bool in_shutdown;
+ bool block_device_reset;
bool in_ofa;
+ bool in_shutdown;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 pqi_reset_quiesce_supported : 1;
u8 soft_reset_handshake_supported : 1;
+ u8 raid_iu_timeout_supported: 1;
+ u8 tmf_iu_timeout_supported: 1;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1170,9 +1180,10 @@ struct pqi_ctrl_info {
spinlock_t raid_bypass_retry_list_lock;
struct work_struct raid_bypass_retry_work;
- struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
- dma_addr_t pqi_ofa_mem_dma_handle;
- void **pqi_ofa_chunk_virt_addr;
+ struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
+ dma_addr_t pqi_ofa_mem_dma_handle;
+ void **pqi_ofa_chunk_virt_addr;
+ atomic_t sync_cmds_outstanding;
};
enum pqi_ctrl_mode {
@@ -1191,10 +1202,6 @@ enum pqi_ctrl_mode {
#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
#define CISS_GET_RAID_MAP 0xc8
-/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
-#define CISS_REPORT_LOG_EXTENDED 0x1
-#define CISS_REPORT_PHYS_EXTENDED 0x2
-
/* BMIC commands */
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
@@ -1208,7 +1215,7 @@ enum pqi_ctrl_mode {
#define BMIC_SET_DIAG_OPTIONS 0xf4
#define BMIC_SENSE_DIAG_OPTIONS 0xf5
-#define CSMI_CC_SAS_SMP_PASSTHRU 0X17
+#define CSMI_CC_SAS_SMP_PASSTHRU 0x17
#define SA_FLUSH_CACHE 0x1
@@ -1244,10 +1251,12 @@ struct bmic_sense_subsystem_info {
u8 ctrl_serial_number[16];
};
-#define SA_EXPANDER_SMP_DEVICE 0x05
-#define SA_CONTROLLER_DEVICE 0x07
-/*SCSI Invalid Device Type for SAS devices*/
-#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
+/* constants for device_type field */
+#define SA_DEVICE_TYPE_SATA 0x1
+#define SA_DEVICE_TYPE_SAS 0x2
+#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5
+#define SA_DEVICE_TYPE_CONTROLLER 0x7
+#define SA_DEVICE_TYPE_NVME 0x9
struct bmic_identify_physical_device {
u8 scsi_bus; /* SCSI Bus number on controller */
@@ -1273,7 +1282,7 @@ struct bmic_identify_physical_device {
__le32 rpm; /* drive rotational speed in RPM */
u8 device_type; /* type of drive */
u8 sata_version; /* only valid when device_type = */
- /* BMIC_DEVICE_TYPE_SATA */
+ /* SA_DEVICE_TYPE_SATA */
__le64 big_total_block_count;
__le64 ris_starting_lba;
__le32 ris_size;
@@ -1396,18 +1405,6 @@ struct bmic_diag_options {
#pragma pack()
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
-{
- void *hostdata = shost_priv(shost);
-
- return *((struct pqi_ctrl_info **)hostdata);
-}
-
-static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
- return !ctrl_info->controller_online;
-}
-
static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
{
atomic_inc(&ctrl_info->num_busy_threads);
@@ -1418,9 +1415,11 @@ static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
atomic_dec(&ctrl_info->num_busy_threads);
}
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
{
- return ctrl_info->block_requests;
+ void *hostdata = shost_priv(shost);
+
+ return *((struct pqi_ctrl_info **)hostdata);
}
void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ea5409bebf57..7b7ef3acb504 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.8-026"
+#define DRIVER_VERSION "1.2.10-025"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 8
-#define DRIVER_REVISION 26
+#define DRIVER_RELEASE 10
+#define DRIVER_REVISION 25
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -211,6 +211,11 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
return scsi3addr[2] != 0;
}
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ return !ctrl_info->controller_online;
+}
+
static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
@@ -235,6 +240,21 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
sis_write_driver_scratch(ctrl_info, mode);
}
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_device_reset = true;
+}
+
+static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_device_reset;
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
+}
+
static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = true;
@@ -331,6 +351,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
return device->in_remove && !ctrl_info->in_shutdown;
}
+static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->in_shutdown = true;
+}
+
+static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->in_shutdown;
+}
+
static inline void pqi_schedule_rescan_worker_with_delay(
struct pqi_ctrl_info *ctrl_info, unsigned long delay)
{
@@ -360,6 +390,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
cancel_delayed_work_sync(&ctrl_info->rescan_work);
}
+static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_work_sync(&ctrl_info->event_work);
+}
+
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
if (!ctrl_info->heartbeat_counter)
@@ -377,7 +412,7 @@ static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
}
static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
- u8 clear)
+ u8 clear)
{
u8 status;
@@ -462,9 +497,9 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
request->data_direction = SOP_READ_FLAG;
cdb[0] = cmd;
if (cmd == CISS_REPORT_PHYS)
- cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+ cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
else
- cdb[1] = CISS_REPORT_LOG_EXTENDED;
+ cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -567,13 +602,12 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
}
static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
- struct pqi_raid_error_info *error_info,
- unsigned long timeout_msecs)
+ u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+ struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
{
int rc;
- enum dma_data_direction dir;
struct pqi_raid_path_request request;
+ enum dma_data_direction dir;
rc = pqi_build_raid_path_request(ctrl_info, &request,
cmd, scsi3addr, buffer,
@@ -581,44 +615,44 @@ static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, error_info, timeout_msecs);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ error_info, timeout_msecs);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
-/* Helper functions for pqi_send_scsi_raid_request */
+/* helper functions for pqi_send_scsi_raid_request */
static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length)
+ u8 cmd, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+ buffer, buffer_length, 0, NULL, NO_TIMEOUT);
}
static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length,
- struct pqi_raid_error_info *error_info)
+ u8 cmd, void *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+ buffer, buffer_length, 0, error_info, NO_TIMEOUT);
}
-
static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
- struct bmic_identify_controller *buffer)
+ struct bmic_identify_controller *buffer)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
- buffer, sizeof(*buffer));
+ buffer, sizeof(*buffer));
}
static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
- struct bmic_sense_subsystem_info *sense_info)
+ struct bmic_sense_subsystem_info *sense_info)
{
return pqi_send_ctrl_raid_request(ctrl_info,
- BMIC_SENSE_SUBSYSTEM_INFORMATION,
- sense_info, sizeof(*sense_info));
+ BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
+ sizeof(*sense_info));
}
static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
@@ -628,83 +662,9 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
}
-static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u16 vpd_page)
-{
- int rc;
- int i;
- int pages;
- unsigned char *buf, bufsize;
-
- buf = kzalloc(256, GFP_KERNEL);
- if (!buf)
- return false;
-
- /* Get the size of the page list first */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, SCSI_VPD_HEADER_SZ);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
- bufsize = pages + SCSI_VPD_HEADER_SZ;
- else
- bufsize = 255;
-
- /* Get the whole VPD page list */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, bufsize);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- for (i = 1; i <= pages; i++)
- if (buf[3 + i] == vpd_page)
- goto exit_supported;
-
-exit_unsupported:
- kfree(buf);
- return false;
-
-exit_supported:
- kfree(buf);
- return true;
-}
-
-static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u8 *device_id, int buflen)
-{
- int rc;
- unsigned char *buf;
-
- if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
- return 1; /* function not supported */
-
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_DEVICE_ID,
- buf, 64);
- if (rc == 0) {
- if (buflen > 16)
- buflen = 16;
- memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
- }
-
- kfree(buf);
-
- return rc;
-}
-
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
- struct bmic_identify_physical_device *buffer,
- size_t buffer_length)
+ struct bmic_identify_physical_device *buffer, size_t buffer_length)
{
int rc;
enum dma_data_direction dir;
@@ -725,6 +685,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
0, NULL, NO_TIMEOUT);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
@@ -763,7 +724,7 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, error_info);
}
-#define PQI_FETCH_PTRAID_DATA (1UL<<31)
+#define PQI_FETCH_PTRAID_DATA (1 << 31)
static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{
@@ -775,14 +736,15 @@ static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
return -ENOMEM;
rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ diag, sizeof(*diag));
if (rc)
goto out;
diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
- rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
+ sizeof(*diag));
+
out:
kfree(diag);
@@ -793,7 +755,7 @@ static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
- buffer, buffer_length);
+ buffer, buffer_length);
}
#pragma pack(1)
@@ -946,7 +908,7 @@ static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
- buffer_length);
+ buffer_length);
}
static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1280,9 +1242,9 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
-#define RAID_BYPASS_STATUS 4
-#define RAID_BYPASS_CONFIGURED 0x1
-#define RAID_BYPASS_ENABLED 0x2
+#define RAID_BYPASS_STATUS 4
+#define RAID_BYPASS_CONFIGURED 0x1
+#define RAID_BYPASS_ENABLED 0x2
bypass_status = buffer[RAID_BYPASS_STATUS];
device->raid_bypass_configured =
@@ -1385,14 +1347,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
}
}
- if (pqi_get_device_id(ctrl_info, device->scsi3addr,
- device->unique_id, sizeof(device->unique_id)) < 0)
- dev_warn(&ctrl_info->pci_dev->dev,
- "Can't get device id for scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target,
- device->lun);
-
out:
kfree(buffer);
@@ -1413,6 +1367,7 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
return;
}
+
device->box_index = id_phys->box_index;
device->phys_box_on_bus = id_phys->phys_box_on_bus;
device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
@@ -1828,7 +1783,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device = new_device_list[i];
find_result = pqi_scsi_find_entry(ctrl_info, device,
- &matching_device);
+ &matching_device);
switch (find_result) {
case DEVICE_SAME:
@@ -2057,9 +2012,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = -ENOMEM;
goto out;
}
- if (pqi_hide_vsep) {
- int i;
+ if (pqi_hide_vsep) {
for (i = num_physicals - 1; i >= 0; i--) {
phys_lun_ext_entry =
&physdev_list->lun_entries[i];
@@ -2132,7 +2086,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->is_physical_device = is_physical_device;
if (is_physical_device) {
if (phys_lun_ext_entry->device_type ==
- SA_EXPANDER_SMP_DEVICE)
+ SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
@@ -2169,16 +2123,13 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (device->is_physical_device) {
device->wwid = phys_lun_ext_entry->wwid;
if ((phys_lun_ext_entry->device_flags &
- REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
+ CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
phys_lun_ext_entry->aio_handle) {
device->aio_enabled = true;
- device->aio_handle =
- phys_lun_ext_entry->aio_handle;
+ device->aio_handle =
+ phys_lun_ext_entry->aio_handle;
}
-
- pqi_get_physical_disk_info(ctrl_info,
- device, id_phys);
-
+ pqi_get_physical_disk_info(ctrl_info, device, id_phys);
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
@@ -3158,7 +3109,7 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
}
static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
- enum pqi_soft_reset_status reset_status)
+ enum pqi_soft_reset_status reset_status)
{
int rc;
@@ -3202,8 +3153,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
if (event_id == PQI_EVENT_OFA_QUIESCE) {
dev_info(&ctrl_info->pci_dev->dev,
- "Received Online Firmware Activation quiesce event for controller %u\n",
- ctrl_info->ctrl_id);
+ "Received Online Firmware Activation quiesce event for controller %u\n",
+ ctrl_info->ctrl_id);
pqi_ofa_ctrl_quiesce(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
if (ctrl_info->soft_reset_handshake_supported) {
@@ -3223,8 +3174,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
pqi_ofa_free_host_buffer(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation(%u) cancel reason : %u\n",
- ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ "Online Firmware Activation(%u) cancel reason : %u\n",
+ ctrl_info->ctrl_id, event->ofa_cancel_reason);
}
mutex_unlock(&ctrl_info->ofa_mutex);
@@ -3403,7 +3354,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
#define PQI_LEGACY_INTX_MASK 0x1
static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
- bool enable_intx)
+ bool enable_intx)
{
u32 intx_mask;
struct pqi_device_registers __iomem *pqi_registers;
@@ -3841,7 +3792,7 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
&pqi_registers->admin_oq_pi_addr);
reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
- (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+ (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
(admin_queues->int_msg_num << 16);
writel(reg, &pqi_registers->admin_iq_num_elements);
writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
@@ -4048,8 +3999,8 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
- *error_info)
+static int pqi_process_raid_io_error_synchronous(
+ struct pqi_raid_error_info *error_info)
{
int rc = -EIO;
@@ -4122,6 +4073,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
goto out;
}
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
+
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -4168,6 +4121,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_free_io_request(io_request);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
up(&ctrl_info->sync_request_sem);
@@ -4665,11 +4619,11 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle,
- GFP_KERNEL);
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
@@ -5402,7 +5356,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
pqi_ctrl_busy(ctrl_info);
if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info)) {
+ pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5419,7 +5373,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->raid_bypass_enabled &&
- !blk_rq_is_passthrough(scmd->request)) {
+ !blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
@@ -5650,6 +5604,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
+{
+ while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5658,7 +5624,8 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-#define PQI_LUN_RESET_TIMEOUT_SECS 10
+#define PQI_LUN_RESET_TIMEOUT_SECS 30
+#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct completion *wait)
@@ -5667,7 +5634,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
rc = 0;
break;
}
@@ -5704,6 +5671,9 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+ if (ctrl_info->tmf_iu_timeout_supported)
+ put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
+ &request->timeout);
pqi_start_io(ctrl_info,
&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
@@ -5733,7 +5703,7 @@ static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
for (retries = 0;;) {
rc = pqi_lun_reset(ctrl_info, device);
- if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
+ if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -5787,17 +5757,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
shost->host_no, device->bus, device->target, device->lun);
pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "controller %u offlined - cannot send device reset\n",
- ctrl_info->ctrl_id);
+ if (pqi_ctrl_offline(ctrl_info) ||
+ pqi_device_reset_blocked(ctrl_info)) {
rc = FAILED;
goto out;
}
pqi_wait_until_ofa_finished(ctrl_info);
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
rc = pqi_device_reset(ctrl_info, device);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
dev_err(&ctrl_info->pci_dev->dev,
@@ -6066,6 +6036,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
put_unaligned_le16(iu_length, &request.header.iu_length);
+ if (ctrl_info->raid_iu_timeout_supported)
+ put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
+
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
@@ -6119,7 +6092,7 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info))
+ if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
return -EBUSY;
switch (cmd) {
@@ -6160,14 +6133,8 @@ static ssize_t pqi_firmware_version_show(struct device *dev,
static ssize_t pqi_driver_version_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- struct Scsi_Host *shost;
- struct pqi_ctrl_info *ctrl_info;
-
- shost = class_to_shost(dev);
- ctrl_info = shost_to_hba(shost);
-
- return snprintf(buffer, PAGE_SIZE,
- "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ DRIVER_VERSION BUILD_TIMESTAMP);
}
static ssize_t pqi_serial_number_show(struct device *dev,
@@ -6283,7 +6250,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned char uid[16];
+ u8 unique_id[16];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6296,16 +6263,22 @@ static ssize_t pqi_unique_id_show(struct device *dev,
flags);
return -ENODEV;
}
- memcpy(uid, device->unique_id, sizeof(uid));
+
+ if (device->is_physical_device) {
+ memset(unique_id, 0, 8);
+ memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
+ } else {
+ memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return snprintf(buffer, PAGE_SIZE,
"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
- uid[0], uid[1], uid[2], uid[3],
- uid[4], uid[5], uid[6], uid[7],
- uid[8], uid[9], uid[10], uid[11],
- uid[12], uid[13], uid[14], uid[15]);
+ unique_id[0], unique_id[1], unique_id[2], unique_id[3],
+ unique_id[4], unique_id[5], unique_id[6], unique_id[7],
+ unique_id[8], unique_id[9], unique_id[10], unique_id[11],
+ unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
}
static ssize_t pqi_lunid_show(struct device *dev,
@@ -6328,6 +6301,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
flags);
return -ENODEV;
}
+
memcpy(lunid, device->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6335,7 +6309,8 @@ static ssize_t pqi_lunid_show(struct device *dev,
return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
}
-#define MAX_PATHS 8
+#define MAX_PATHS 8
+
static ssize_t pqi_path_info_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -6347,9 +6322,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
int output_len = 0;
u8 box;
u8 bay;
- u8 path_map_index = 0;
+ u8 path_map_index;
char *active;
- unsigned char phys_connector[2];
+ u8 phys_connector[2];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6365,7 +6340,7 @@ static ssize_t pqi_path_info_show(struct device *dev,
bay = device->bay;
for (i = 0; i < MAX_PATHS; i++) {
- path_map_index = 1<<i;
+ path_map_index = 1 << i;
if (i == device->active_path_index)
active = "Active";
else if (device->path_map & path_map_index)
@@ -6416,10 +6391,10 @@ end_buffer:
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
return output_len;
}
-
static ssize_t pqi_sas_address_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6440,6 +6415,7 @@ static ssize_t pqi_sas_address_show(struct device *dev,
flags);
return -ENODEV;
}
+
sas_address = device->sas_address;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6844,6 +6820,27 @@ static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
firmware_feature->feature_name);
}
+static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
+ ctrl_info->soft_reset_handshake_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
+ ctrl_info->raid_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
+ ctrl_info->tmf_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ }
+
+ pqi_firmware_feature_status(ctrl_info, firmware_feature);
+}
+
static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
struct pqi_firmware_feature *firmware_feature)
{
@@ -6867,7 +6864,17 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
{
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
- .feature_status = pqi_firmware_feature_status,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "TMF IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
},
};
@@ -6921,7 +6928,6 @@ static void pqi_process_firmware_features(
return;
}
- ctrl_info->soft_reset_handshake_supported = false;
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
@@ -6929,10 +6935,6 @@ static void pqi_process_firmware_features(
firmware_features_iomem_addr,
pqi_firmware_features[i].feature_bit)) {
pqi_firmware_features[i].enabled = true;
- if (pqi_firmware_features[i].feature_bit ==
- PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
- ctrl_info->soft_reset_handshake_supported =
- true;
}
pqi_firmware_feature_update(ctrl_info,
&pqi_firmware_features[i]);
@@ -7074,13 +7076,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- rc = pqi_force_sis_mode(ctrl_info);
- if (rc)
- return rc;
+ if (reset_devices) {
+ sis_soft_reset(ctrl_info);
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ } else {
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+ }
/*
* Wait until the controller is ready to start accepting SIS
@@ -7386,7 +7395,7 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining product detail\n");
+ "error obtaining product details\n");
return rc;
}
@@ -7514,6 +7523,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
atomic_set(&ctrl_info->num_interrupts, 0);
+ atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7721,6 +7731,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
dev_err(dev, "Failed to allocate host buffer of size = %u",
bytes_requested);
}
+
+ return;
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
@@ -7787,8 +7799,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
0, NULL, NO_TIMEOUT);
}
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
-
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
{
msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
@@ -7956,28 +7966,73 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
pqi_remove_ctrl(ctrl_info);
}
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+ scmd = io_request->scmd;
+ WARN_ON(scmd != NULL); /* IO command from SML */
+ WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
+ }
+}
+
static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
ctrl_info = pci_get_drvdata(pci_dev);
- if (!ctrl_info)
- goto error;
+ if (!ctrl_info) {
+ dev_err(&pci_dev->dev,
+ "cache could not be flushed\n");
+ return;
+ }
+
+ pqi_disable_events(ctrl_info);
+ pqi_wait_until_ofa_finished(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_event_worker(ctrl_info);
+
+ pqi_ctrl_shutdown_start(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending I/O failed\n");
+ return;
+ }
+
+ pqi_ctrl_block_device_reset(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
- pqi_free_interrupts(ctrl_info);
- pqi_reset(ctrl_info);
- if (rc == 0)
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache\n");
+
+ pqi_ctrl_block_requests(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending sync cmds failed\n");
return;
+ }
-error:
- dev_warn(&pci_dev->dev,
- "unable to flush controller cache\n");
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_reset(ctrl_info);
}
static void pqi_process_lockup_action_param(void)
@@ -8686,6 +8741,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
cdb) != 32);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ timeout) != 60);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
sg_descriptors) != 64);
BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
@@ -8840,6 +8897,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
nexus_id) != 10);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ timeout) != 14);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
lun_number) != 16);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
protocol_specific) != 24);
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 6776dfc1d317..b7289112455c 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
struct sas_phy *phy = pqi_sas_phy->phy;
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (pqi_sas_phy->added_to_port)
list_del(&pqi_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(pqi_sas_phy);
}
@@ -312,7 +312,6 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy)
static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
u64 *identifier)
{
-
int rc;
unsigned long flags;
struct Scsi_Host *shost;
@@ -361,7 +360,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
}
}
- if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) {
+ if (found_device->phy_connected_dev_type != SA_DEVICE_TYPE_CONTROLLER) {
rc = -EINVAL;
goto out;
}
@@ -382,12 +381,10 @@ out:
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return rc;
-
}
static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
{
-
int rc;
unsigned long flags;
struct pqi_ctrl_info *ctrl_info;
@@ -482,7 +479,6 @@ pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy,
req_size -= SMP_CRC_FIELD_LENGTH;
put_unaligned_le32(req_size, &parameters->request_length);
-
put_unaligned_le32(resp_size, &parameters->response_length);
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -512,12 +508,12 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
int rc;
- struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ struct pqi_ctrl_info *ctrl_info;
struct bmic_csmi_smp_passthru_buffer *smp_buf;
struct pqi_raid_error_info error_info;
unsigned int reslen = 0;
- pqi_ctrl_busy(ctrl_info);
+ ctrl_info = shost_to_hba(shost);
if (job->reply_payload.payload_len == 0) {
rc = -ENOMEM;
@@ -539,16 +535,6 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto out;
}
- if (pqi_ctrl_offline(ctrl_info)) {
- rc = -ENXIO;
- goto out;
- }
-
- if (pqi_ctrl_blocked(ctrl_info)) {
- rc = -EBUSY;
- goto out;
- }
-
smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job);
if (!smp_buf) {
rc = -ENOMEM;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 955e4c938d49..701b842296f0 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -501,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
@@ -523,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 0b845ab7c3bf..d14c2243e02a 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -132,6 +132,16 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+config SCSI_UFS_TI_J721E
+ tristate "TI glue layer for Cadence UFS Controller"
+ depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
+ help
+ This selects driver for TI glue layer for Cadence UFS Host
+ Controller IP.
+
+ Selects this if you have TI platform with UFS controller.
+ If unsure, say N.
+
config SCSI_UFS_BSG
bool "Universal Flash Storage BSG device node"
depends on SCSI_UFSHCD
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2a9097939bcb..94c6c5d7334b 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
new file mode 100644
index 000000000000..5216d228cdd9
--- /dev/null
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define TI_UFS_SS_CTRL 0x4
+#define TI_UFS_SS_RST_N_PCS BIT(0)
+#define TI_UFS_SS_CLK_26MHZ BIT(4)
+
+static int ti_j721e_ufs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long clk_rate;
+ void __iomem *regbase;
+ struct clk *clk;
+ u32 reg = 0;
+ int ret;
+
+ regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regbase))
+ return PTR_ERR(regbase);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ /* Select MPHY refclk frequency */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Cannot claim MPHY clock.\n");
+ return PTR_ERR(clk);
+ }
+ clk_rate = clk_get_rate(clk);
+ if (clk_rate == 26000000)
+ reg |= TI_UFS_SS_CLK_26MHZ;
+ devm_clk_put(dev, clk);
+
+ /* Take UFS slave device out of reset */
+ reg |= TI_UFS_SS_RST_N_PCS;
+ writel(reg, regbase + TI_UFS_SS_CTRL);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+ dev);
+ if (ret) {
+ dev_err(dev, "failed to populate child nodes %d\n", ret);
+ pm_runtime_put_sync(dev);
+ }
+
+ return ret;
+}
+
+static int ti_j721e_ufs_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_j721e_ufs_of_match[] = {
+ {
+ .compatible = "ti,j721e-ufs",
+ },
+ { },
+};
+
+static struct platform_driver ti_j721e_ufs_driver = {
+ .probe = ti_j721e_ufs_probe,
+ .remove = ti_j721e_ufs_remove,
+ .driver = {
+ .name = "ti-j721e-ufs",
+ .of_match_table = ti_j721e_ufs_of_match,
+ },
+};
+module_platform_driver(ti_j721e_ufs_driver);
+
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_DESCRIPTION("TI UFS host controller glue driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 6bbb1679bb91..5d6487350a6c 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -452,10 +452,7 @@ static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
/* get resource of ufs sys ctrl */
host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(host->ufs_sys_ctrl))
- return PTR_ERR(host->ufs_sys_ctrl);
-
- return 0;
+ return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
}
static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0f6ff33ce52e..83e28edc3ac5 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -147,6 +147,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
if (err)
goto out_variant_clear;
+ /* Enable runtime autosuspend */
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index a5b71487a206..c69c29a1ceb9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -246,6 +246,44 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
mb();
}
+/**
+ * ufs_qcom_host_reset - reset host controller and PHY
+ */
+static int ufs_qcom_host_reset(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (!host->core_reset) {
+ dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ goto out;
+ }
+
+ ret = reset_control_assert(host->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(host->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
+ usleep_range(1000, 1100);
+
+out:
+ return ret;
+}
+
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -254,6 +292,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
+ /* Reset UFS Host Controller and PHY */
+ ret = ufs_qcom_host_reset(hba);
+ if (ret)
+ dev_warn(hba->dev, "%s: host reset returned %d\n",
+ __func__, ret);
+
if (is_rate_B)
phy_set_mode(phy, PHY_MODE_UFS_HS_B);
@@ -1101,6 +1145,15 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
+ /* Setup the reset control of HCI */
+ host->core_reset = devm_reset_control_get(hba->dev, "rst");
+ if (IS_ERR(host->core_reset)) {
+ err = PTR_ERR(host->core_reset);
+ dev_warn(dev, "Failed to get reset control %d\n", err);
+ host->core_reset = NULL;
+ err = 0;
+ }
+
/* Fire up the reset controller. Failure here is non-fatal. */
host->rcdev.of_node = dev->of_node;
host->rcdev.ops = &ufs_qcom_reset_ops;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index d401f174bb70..2d95e7cc7187 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -6,6 +6,7 @@
#define UFS_QCOM_H_
#include <linux/reset-controller.h>
+#include <linux/reset.h>
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
@@ -233,6 +234,8 @@ struct ufs_qcom_host {
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+ /* Reset control of HCI */
+ struct reset_control *core_reset;
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 969a36b15897..ad2abc96c0f1 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit == ahit)
- goto out_unlock;
- hba->ahit = ahit;
- if (!pm_runtime_suspended(hba->dev))
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+ if (hba->ahit != ahit)
+ hba->ahit = ahit;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!pm_runtime_suspended(hba->dev)) {
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ufshcd_auto_hibern8_enable(hba);
+ ufshcd_release(hba);
+ pm_runtime_put(hba->dev);
+ }
}
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index dc2f6d2b46ed..baeecee35d1e 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -162,6 +162,7 @@ out:
/**
* ufs_bsg_remove - detach and remove the added ufs-bsg node
+ * @hba: per adapter object
*
* Should be called when unloading the driver.
*/
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
index fb9e2ff4f8d2..6a901da2d15a 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -80,7 +80,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8d40dc918f4e..76f9be71c31b 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -402,7 +402,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "IRQ resource not available\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 11a87f51c442..b5966faf3e98 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -88,6 +88,9 @@
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
+/* default delay of autosuspend: 2000 ms */
+#define RPM_AUTOSUSPEND_DELAY_MS 2000
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -114,7 +117,7 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
return -EINVAL;
- regs = kzalloc(len, GFP_KERNEL);
+ regs = kzalloc(len, GFP_ATOMIC);
if (!regs)
return -ENOMEM;
@@ -237,7 +240,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
END_FIX
};
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -1607,7 +1610,7 @@ static void ufshcd_gate_work(struct work_struct *work)
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state == REQ_CLKS_ON)) {
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
@@ -1935,8 +1938,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
} else {
dev_warn(hba->dev,
- "%s: Response size is bigger than buffer",
- __func__);
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, buf_len);
return -EINVAL;
}
}
@@ -2986,10 +2989,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@@ -3856,6 +3859,9 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
ret = ufshcd_host_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3885,15 +3891,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
+ int err;
+
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
/*
- * If link recovery fails then return error so that caller
- * don't retry the hibern8 enter again.
+ * If link recovery fails then return error code returned from
+ * ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
} else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
@@ -3907,7 +3922,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret || ret == -ENOLINK)
+ if (!ret)
goto out;
}
out:
@@ -3941,7 +3956,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
@@ -4631,9 +4646,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
+ struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+
+ if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ sdev->rpm_autosuspend = 1;
+
return 0;
}
@@ -4788,19 +4808,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ }
+ return retval;
}
/**
@@ -4856,8 +4886,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
@@ -4876,7 +4910,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
@@ -5395,61 +5434,77 @@ out:
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
/* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg)
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg) {
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg) {
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg) {
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
@@ -5472,10 +5527,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
@@ -5484,7 +5544,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
@@ -5532,6 +5592,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
}
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -5539,44 +5600,62 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if (hba->errors)
- ufshcd_check_errors(hba);
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -5584,8 +5663,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
@@ -5608,14 +5688,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status)
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
} while (intr_status && --retries);
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
+ }
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5760,9 +5844,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* @hba: per-adapter instance
* @req_upiu: upiu request
* @rsp_upiu: upiu reply
- * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
* @desc_buff: pointer to descriptor buffer, NULL if NA
* @buff_len: descriptor size, 0 if NA
+ * @cmd_type: specifies the type (NOP, Query...)
* @desc_op: descriptor operation
*
* Those type of requests uses UTP Transfer Request Descriptor - utrd.
@@ -5776,7 +5860,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
u8 *desc_buff, int *buff_len,
- int cmd_type,
+ enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
struct ufshcd_lrb *lrbp;
@@ -5856,7 +5940,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memcpy(desc_buff, descp, resp_len);
*buff_len = resp_len;
} else {
- dev_warn(hba->dev, "rsp size is bigger than buffer");
+ dev_warn(hba->dev,
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, *buff_len);
*buff_len = 0;
err = -EINVAL;
}
@@ -5891,7 +5977,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op)
{
int err;
- int cmd_type = DEV_CMD_TYPE_QUERY;
+ enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
struct utp_task_req_desc treq = { { 0 }, };
int ocs_value;
u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
@@ -6770,23 +6856,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
&hba->desc_size.geom_desc);
if (err)
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
&hba->desc_size.hlth_desc);
if (err)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
-static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
-{
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
-}
-
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6881,9 +6957,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
@@ -6934,6 +7007,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -7069,6 +7145,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
.dma_boundary = PAGE_SIZE - 1,
+ .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -7950,12 +8027,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
goto out;
set_old_link_state:
@@ -8274,9 +8351,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- /* Set descriptor lengths to specification defaults */
- ufshcd_def_desc_sizes(hba);
-
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c94cfda52829..2740f6941ec6 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -716,6 +716,12 @@ struct ufs_hba {
* the performance of ongoing read/write operations.
*/
#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -749,6 +755,10 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
}
+static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
+}
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
@@ -916,6 +926,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index dbb75cd28dc8..c2961d37cc1c 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -195,7 +195,7 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
-#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index ca8e3abeb2c7..a23a8e5794f5 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+ return dma_len > (1U << 16) ? (1U << 16) : dma_len;
+}
+
+static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
+ u32 dma_len)
+{
+ /* The old driver used 0xfffc as limit, so do that here too */
+ return dma_len > 0xfffc ? 0xfffc : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
.irq_pending = fastlane_esp_irq_pending,
- .dma_length_limit = zorro_esp_dma_length_limit,
+ .dma_length_limit = fastlane_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
.dma_drain = zorro_esp_dma_drain,
.dma_invalidate = fastlane_esp_dma_invalidate,
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 6d0d04f163cb..01fc0d20a70d 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -40,6 +40,7 @@ static const struct meson_gx_soc_id {
{ "G12A", 0x28 },
{ "G12B", 0x29 },
{ "SM1", 0x2b },
+ { "A1", 0x2c },
};
static const struct meson_gx_package_id {
@@ -68,6 +69,8 @@ static const struct meson_gx_package_id {
{ "S922X", 0x29, 0x40, 0xf0 },
{ "A311D", 0x29, 0x10, 0xf0 },
{ "S905X3", 0x2b, 0x5, 0xf },
+ { "S905D3", 0x2b, 0xb0, 0xf0 },
+ { "A113L", 0x2c, 0x0, 0xf8 },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 48f7ac238861..f3d8d53ab84d 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -97,13 +97,13 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
return ret ? ret : copied;
}
-static unsigned int snoop_file_poll(struct file *file,
+static __poll_t snoop_file_poll(struct file *file,
struct poll_table_struct *pt)
{
struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
poll_wait(file, &chan->wq, pt);
- return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0;
+ return !kfifo_is_empty(&chan->fifo) ? EPOLLIN : 0;
}
static const struct file_operations snoop_fops = {
diff --git a/drivers/soc/atmel/Kconfig b/drivers/soc/atmel/Kconfig
index 05528139b023..50caf6db9c0e 100644
--- a/drivers/soc/atmel/Kconfig
+++ b/drivers/soc/atmel/Kconfig
@@ -5,3 +5,14 @@ config AT91_SOC_ID
default ARCH_AT91
help
Include support for the SoC bus on the Atmel ARM SoCs.
+
+config AT91_SOC_SFR
+ tristate "Special Function Registers support"
+ depends on ARCH_AT91 || COMPILE_TEST
+ help
+ This is a driver for the Special Function Registers available on
+ Atmel SAMA5Dx SoCs, providing access to specific aspects of the
+ integrated memory, bridge implementations, processor etc.
+
+ This driver can also be built as a module. If so, the module
+ will be called sfr.
diff --git a/drivers/soc/atmel/Makefile b/drivers/soc/atmel/Makefile
index 7ca355d10553..d849a897cd77 100644
--- a/drivers/soc/atmel/Makefile
+++ b/drivers/soc/atmel/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_AT91_SOC_ID) += soc.o
+obj-$(CONFIG_AT91_SOC_SFR) += sfr.o
diff --git a/drivers/soc/atmel/sfr.c b/drivers/soc/atmel/sfr.c
new file mode 100644
index 000000000000..0525eef49d1a
--- /dev/null
+++ b/drivers/soc/atmel/sfr.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sfr.c - driver for special function registers
+ *
+ * Copyright (C) 2019 Bootlin.
+ *
+ */
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/random.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define SFR_SN0 0x4c
+#define SFR_SN_SIZE 8
+
+struct atmel_sfr_priv {
+ struct regmap *regmap;
+};
+
+static int atmel_sfr_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct atmel_sfr_priv *priv = context;
+
+ return regmap_bulk_read(priv->regmap, SFR_SN0 + offset,
+ buf, bytes / 4);
+}
+
+static struct nvmem_config atmel_sfr_nvmem_config = {
+ .name = "atmel-sfr",
+ .read_only = true,
+ .word_size = 4,
+ .stride = 4,
+ .size = SFR_SN_SIZE,
+ .reg_read = atmel_sfr_read,
+};
+
+static int atmel_sfr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct nvmem_device *nvmem;
+ struct atmel_sfr_priv *priv;
+ u8 sn[SFR_SN_SIZE];
+ int ret;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "cannot get parent's regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ atmel_sfr_nvmem_config.dev = dev;
+ atmel_sfr_nvmem_config.priv = priv;
+
+ nvmem = devm_nvmem_register(dev, &atmel_sfr_nvmem_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(dev, "error registering nvmem config\n");
+ return PTR_ERR(nvmem);
+ }
+
+ ret = atmel_sfr_read(priv, 0, sn, SFR_SN_SIZE);
+ if (ret == 0)
+ add_device_randomness(sn, SFR_SN_SIZE);
+
+ return ret;
+}
+
+static const struct of_device_id atmel_sfr_dt_ids[] = {
+ {
+ .compatible = "atmel,sama5d2-sfr",
+ }, {
+ .compatible = "atmel,sama5d4-sfr",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, atmel_sfr_dt_ids);
+
+static struct platform_driver atmel_sfr_driver = {
+ .probe = atmel_sfr_probe,
+ .driver = {
+ .name = "atmel-sfr",
+ .of_match_table = atmel_sfr_dt_ids,
+ },
+};
+module_platform_driver(atmel_sfr_driver);
+
+MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
+MODULE_DESCRIPTION("Atmel SFR SN driver for SAMA5D2/4 SoC family");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index f9ad8ad54a7d..4df32bc4c7a6 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -40,4 +40,14 @@ config DPAA2_CONSOLE
/dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
which can be used to dump the Management Complex and AIOP
firmware logs.
+
+config FSL_RCPM
+ bool "Freescale RCPM support"
+ depends on PM_SLEEP && (ARM || ARM64)
+ help
+ The NXP QorIQ Processors based on ARM Core have RCPM module
+ (Run Control and Power Management), which performs all device-level
+ tasks associated with power management, such as wakeup source control.
+ Note that currently this driver will not support PowerPC based
+ QorIQ processor.
endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 71dee8d0d1f0..906f1cd8af01 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
+obj-$(CONFIG_FSL_RCPM) += rcpm.o
obj-$(CONFIG_FSL_GUTS) += guts.o
obj-$(CONFIG_FSL_MC_DPIO) += dpio/
obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
new file mode 100644
index 000000000000..a093dbe6d2cb
--- /dev/null
+++ b/drivers/soc/fsl/rcpm.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// rcpm.c - Freescale QorIQ RCPM driver
+//
+// Copyright 2019 NXP
+//
+// Author: Ran Wang <ran.wang_1@nxp.com>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/kernel.h>
+
+#define RCPM_WAKEUP_CELL_MAX_SIZE 7
+
+struct rcpm {
+ unsigned int wakeup_cells;
+ void __iomem *ippdexpcr_base;
+ bool little_endian;
+};
+
+/**
+ * rcpm_pm_prepare - performs device-level tasks associated with power
+ * management, such as programming related to the wakeup source control.
+ * @dev: Device to handle.
+ *
+ */
+static int rcpm_pm_prepare(struct device *dev)
+{
+ int i, ret, idx;
+ void __iomem *base;
+ struct wakeup_source *ws;
+ struct rcpm *rcpm;
+ struct device_node *np = dev->of_node;
+ u32 value[RCPM_WAKEUP_CELL_MAX_SIZE + 1];
+ u32 setting[RCPM_WAKEUP_CELL_MAX_SIZE] = {0};
+
+ rcpm = dev_get_drvdata(dev);
+ if (!rcpm)
+ return -EINVAL;
+
+ base = rcpm->ippdexpcr_base;
+ idx = wakeup_sources_read_lock();
+
+ /* Begin with first registered wakeup source */
+ for_each_wakeup_source(ws) {
+
+ /* skip object which is not attached to device */
+ if (!ws->dev || !ws->dev->parent)
+ continue;
+
+ ret = device_property_read_u32_array(ws->dev->parent,
+ "fsl,rcpm-wakeup", value,
+ rcpm->wakeup_cells + 1);
+
+ /* Wakeup source should refer to current rcpm device */
+ if (ret || (np->phandle != value[0]))
+ continue;
+
+ /* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the
+ * number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup"
+ * of wakeup source IP contains an integer array: <phandle to
+ * RCPM node, IPPDEXPCR0 setting, IPPDEXPCR1 setting,
+ * IPPDEXPCR2 setting, etc>.
+ *
+ * So we will go thought them to collect setting data.
+ */
+ for (i = 0; i < rcpm->wakeup_cells; i++)
+ setting[i] |= value[i + 1];
+ }
+
+ wakeup_sources_read_unlock(idx);
+
+ /* Program all IPPDEXPCRn once */
+ for (i = 0; i < rcpm->wakeup_cells; i++) {
+ u32 tmp = setting[i];
+ void __iomem *address = base + i * 4;
+
+ if (!tmp)
+ continue;
+
+ /* We can only OR related bits */
+ if (rcpm->little_endian) {
+ tmp |= ioread32(address);
+ iowrite32(tmp, address);
+ } else {
+ tmp |= ioread32be(address);
+ iowrite32be(tmp, address);
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops rcpm_pm_ops = {
+ .prepare = rcpm_pm_prepare,
+};
+
+static int rcpm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct rcpm *rcpm;
+ int ret;
+
+ rcpm = devm_kzalloc(dev, sizeof(*rcpm), GFP_KERNEL);
+ if (!rcpm)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ rcpm->ippdexpcr_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(rcpm->ippdexpcr_base)) {
+ ret = PTR_ERR(rcpm->ippdexpcr_base);
+ return ret;
+ }
+
+ rcpm->little_endian = device_property_read_bool(
+ &pdev->dev, "little-endian");
+
+ ret = device_property_read_u32(&pdev->dev,
+ "#fsl,rcpm-wakeup-cells", &rcpm->wakeup_cells);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&pdev->dev, rcpm);
+
+ return 0;
+}
+
+static const struct of_device_id rcpm_of_match[] = {
+ { .compatible = "fsl,qoriq-rcpm-2.1+", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rcpm_of_match);
+
+static struct platform_driver rcpm_driver = {
+ .driver = {
+ .name = "rcpm",
+ .of_match_table = rcpm_of_match,
+ .pm = &rcpm_pm_ops,
+ },
+ .probe = rcpm_probe,
+};
+
+module_platform_driver(rcpm_driver);
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
index c68882eb80f7..fb70b8a3f7c5 100644
--- a/drivers/soc/imx/soc-imx-scu.c
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -33,12 +33,10 @@ struct imx_sc_msg_misc_get_soc_uid {
u32 uid_high;
} __packed;
-static ssize_t soc_uid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int imx_scu_soc_uid(u64 *soc_uid)
{
struct imx_sc_msg_misc_get_soc_uid msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
- u64 soc_uid;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
@@ -52,15 +50,13 @@ static ssize_t soc_uid_show(struct device *dev,
return ret;
}
- soc_uid = msg.uid_high;
- soc_uid <<= 32;
- soc_uid |= msg.uid_low;
+ *soc_uid = msg.uid_high;
+ *soc_uid <<= 32;
+ *soc_uid |= msg.uid_low;
- return sprintf(buf, "%016llX\n", soc_uid);
+ return 0;
}
-static DEVICE_ATTR_RO(soc_uid);
-
static int imx_scu_soc_id(void)
{
struct imx_sc_msg_misc_get_soc_id msg;
@@ -89,6 +85,7 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
int id, ret;
+ u64 uid = 0;
u32 val;
ret = imx_scu_get_handle(&soc_ipc_handle);
@@ -112,6 +109,10 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
if (id < 0)
return -EINVAL;
+ ret = imx_scu_soc_uid(&uid);
+ if (ret < 0)
+ return -EINVAL;
+
/* format soc_id value passed from SCU firmware */
val = id & 0x1f;
soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", val);
@@ -130,19 +131,22 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
goto free_soc_id;
}
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
+ goto free_revision;
+ }
+
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
ret = PTR_ERR(soc_dev);
- goto free_revision;
+ goto free_serial_number;
}
- ret = device_create_file(soc_device_to_device(soc_dev),
- &dev_attr_soc_uid);
- if (ret)
- goto free_revision;
-
return 0;
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
free_revision:
kfree(soc_dev_attr->revision);
free_soc_id:
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index b9831576dd25..d84ed736cdb0 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
#include <linux/of.h>
#define REV_B1 0x21
@@ -16,6 +17,8 @@
#define IMX8MQ_SW_INFO_B1 0x40
#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+#define IMX_SIP_GET_SOC_INFO 0xc2000006
+
#define OCOTP_UID_LOW 0x410
#define OCOTP_UID_HIGH 0x420
@@ -29,13 +32,21 @@ struct imx8_soc_data {
static u64 soc_uid;
-static ssize_t soc_uid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+#ifdef CONFIG_HAVE_ARM_SMCCC
+static u32 imx8mq_soc_revision_from_atf(void)
{
- return sprintf(buf, "%016llX\n", soc_uid);
-}
+ struct arm_smccc_res res;
-static DEVICE_ATTR_RO(soc_uid);
+ arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return 0;
+ else
+ return res.a0 & 0xff;
+}
+#else
+static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
+#endif
static u32 __init imx8mq_soc_revision(void)
{
@@ -51,9 +62,16 @@ static u32 __init imx8mq_soc_revision(void)
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
- magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
- if (magic == IMX8MQ_SW_MAGIC_B1)
- rev = REV_B1;
+ /*
+ * SOC revision on older imx8mq is not available in fuses so query
+ * the value from ATF instead.
+ */
+ rev = imx8mq_soc_revision_from_atf();
+ if (!rev) {
+ magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
+ if (magic == IMX8MQ_SW_MAGIC_B1)
+ rev = REV_B1;
+ }
soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
soc_uid <<= 32;
@@ -174,22 +192,25 @@ static int __init imx8_soc_init(void)
goto free_soc;
}
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
+ goto free_rev;
+ }
+
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
ret = PTR_ERR(soc_dev);
- goto free_rev;
+ goto free_serial_number;
}
- ret = device_create_file(soc_device_to_device(soc_dev),
- &dev_attr_soc_uid);
- if (ret)
- goto free_rev;
-
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
return 0;
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
free_rev:
if (strcmp(soc_dev_attr->revision, "unknown"))
kfree(soc_dev_attr->revision);
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 7aa0517ff2f3..3c82de5f9417 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -155,7 +155,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
offset_mask |= CMDQ_WRITE_ENABLE_MASK;
}
- err |= cmdq_pkt_write(pkt, value, subsys, offset_mask);
+ err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
return err;
}
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 503222d0d0da..f669d3754627 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -21,7 +21,7 @@
#include <dt-bindings/power/mt8173-power.h>
#define MTK_POLL_DELAY_US 10
-#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+#define MTK_POLL_TIMEOUT USEC_PER_SEC
#define MTK_SCPD_ACTIVE_WAKEUP BIT(0)
#define MTK_SCPD_FWAIT_SRAM BIT(1)
@@ -108,6 +108,17 @@ static const char * const clk_names[] = {
#define MAX_CLKS 3
+/**
+ * struct scp_domain_data - scp domain data for power on/off flow
+ * @name: The domain name.
+ * @sta_mask: The mask for power on/off status bit.
+ * @ctl_offs: The offset for main power control register.
+ * @sram_pdn_bits: The mask for sram power control bits.
+ * @sram_pdn_ack_bits: The mask for sram power control acked bits.
+ * @bus_prot_mask: The mask for single step bus protection.
+ * @clk_id: The basic clocks required by this power domain.
+ * @caps: The flag for active wake-up action.
+ */
struct scp_domain_data {
const char *name;
u32 sta_mask;
@@ -180,32 +191,132 @@ static int scpsys_domain_is_on(struct scp_domain *scpd)
return -EINVAL;
}
+static int scpsys_regulator_enable(struct scp_domain *scpd)
+{
+ if (!scpd->supply)
+ return 0;
+
+ return regulator_enable(scpd->supply);
+}
+
+static int scpsys_regulator_disable(struct scp_domain *scpd)
+{
+ if (!scpd->supply)
+ return 0;
+
+ return regulator_disable(scpd->supply);
+}
+
+static void scpsys_clk_disable(struct clk *clk[], int max_num)
+{
+ int i;
+
+ for (i = max_num - 1; i >= 0; i--)
+ clk_disable_unprepare(clk[i]);
+}
+
+static int scpsys_clk_enable(struct clk *clk[], int max_num)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < max_num && clk[i]; i++) {
+ ret = clk_prepare_enable(clk[i]);
+ if (ret) {
+ scpsys_clk_disable(clk, i);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int scpsys_sram_enable(struct scp_domain *scpd, void __iomem *ctl_addr)
+{
+ u32 val;
+ u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+ int tmp;
+
+ val = readl(ctl_addr);
+ val &= ~scpd->data->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
+ if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
+ /*
+ * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
+ * MT7622_POWER_DOMAIN_WB and thus just a trivial setup
+ * is applied here.
+ */
+ usleep_range(12000, 12100);
+ } else {
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ int ret = readl_poll_timeout(ctl_addr, tmp,
+ (tmp & pdn_ack) == 0,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_sram_disable(struct scp_domain *scpd, void __iomem *ctl_addr)
+{
+ u32 val;
+ u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+ int tmp;
+
+ val = readl(ctl_addr);
+ val |= scpd->data->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ return readl_poll_timeout(ctl_addr, tmp,
+ (tmp & pdn_ack) == pdn_ack,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+}
+
+static int scpsys_bus_protect_enable(struct scp_domain *scpd)
+{
+ struct scp *scp = scpd->scp;
+
+ if (!scpd->data->bus_prot_mask)
+ return 0;
+
+ return mtk_infracfg_set_bus_protection(scp->infracfg,
+ scpd->data->bus_prot_mask,
+ scp->bus_prot_reg_update);
+}
+
+static int scpsys_bus_protect_disable(struct scp_domain *scpd)
+{
+ struct scp *scp = scpd->scp;
+
+ if (!scpd->data->bus_prot_mask)
+ return 0;
+
+ return mtk_infracfg_clear_bus_protection(scp->infracfg,
+ scpd->data->bus_prot_mask,
+ scp->bus_prot_reg_update);
+}
+
static int scpsys_power_on(struct generic_pm_domain *genpd)
{
struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
struct scp *scp = scpd->scp;
void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
- u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
u32 val;
int ret, tmp;
- int i;
- if (scpd->supply) {
- ret = regulator_enable(scpd->supply);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++) {
- ret = clk_prepare_enable(scpd->clk[i]);
- if (ret) {
- for (--i; i >= 0; i--)
- clk_disable_unprepare(scpd->clk[i]);
+ ret = scpsys_regulator_enable(scpd);
+ if (ret < 0)
+ return ret;
- goto err_clk;
- }
- }
+ ret = scpsys_clk_enable(scpd->clk, MAX_CLKS);
+ if (ret)
+ goto err_clk;
+ /* subsys power on */
val = readl(ctl_addr);
val |= PWR_ON_BIT;
writel(val, ctl_addr);
@@ -227,43 +338,20 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
val |= PWR_RST_B_BIT;
writel(val, ctl_addr);
- val &= ~scpd->data->sram_pdn_bits;
- writel(val, ctl_addr);
-
- /* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
- if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
- /*
- * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
- * MT7622_POWER_DOMAIN_WB and thus just a trivial setup is
- * applied here.
- */
- usleep_range(12000, 12100);
-
- } else {
- ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == 0,
- MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
- if (ret < 0)
- goto err_pwr_ack;
- }
+ ret = scpsys_sram_enable(scpd, ctl_addr);
+ if (ret < 0)
+ goto err_pwr_ack;
- if (scpd->data->bus_prot_mask) {
- ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
- scpd->data->bus_prot_mask,
- scp->bus_prot_reg_update);
- if (ret)
- goto err_pwr_ack;
- }
+ ret = scpsys_bus_protect_disable(scpd);
+ if (ret < 0)
+ goto err_pwr_ack;
return 0;
err_pwr_ack:
- for (i = MAX_CLKS - 1; i >= 0; i--) {
- if (scpd->clk[i])
- clk_disable_unprepare(scpd->clk[i]);
- }
+ scpsys_clk_disable(scpd->clk, MAX_CLKS);
err_clk:
- if (scpd->supply)
- regulator_disable(scpd->supply);
+ scpsys_regulator_disable(scpd);
dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
@@ -275,29 +363,19 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
struct scp *scp = scpd->scp;
void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
- u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
u32 val;
int ret, tmp;
- int i;
-
- if (scpd->data->bus_prot_mask) {
- ret = mtk_infracfg_set_bus_protection(scp->infracfg,
- scpd->data->bus_prot_mask,
- scp->bus_prot_reg_update);
- if (ret)
- goto out;
- }
- val = readl(ctl_addr);
- val |= scpd->data->sram_pdn_bits;
- writel(val, ctl_addr);
+ ret = scpsys_bus_protect_enable(scpd);
+ if (ret < 0)
+ goto out;
- /* wait until SRAM_PDN_ACK all 1 */
- ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == pdn_ack,
- MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ ret = scpsys_sram_disable(scpd, ctl_addr);
if (ret < 0)
goto out;
+ /* subsys power off */
+ val = readl(ctl_addr);
val |= PWR_ISO_BIT;
writel(val, ctl_addr);
@@ -319,11 +397,11 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
if (ret < 0)
goto out;
- for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++)
- clk_disable_unprepare(scpd->clk[i]);
+ scpsys_clk_disable(scpd->clk, MAX_CLKS);
- if (scpd->supply)
- regulator_disable(scpd->supply);
+ ret = scpsys_regulator_disable(scpd);
+ if (ret < 0)
+ goto out;
return 0;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 661e47acc354..79d826553ac8 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -58,22 +58,24 @@ config QCOM_LLCC
depends on ARCH_QCOM || COMPILE_TEST
help
Qualcomm Technologies, Inc. platform specific
- Last Level Cache Controller(LLCC) driver. This provides interfaces
- to clients that use the LLCC. Say yes here to enable LLCC slice
- driver.
-
-config QCOM_SDM845_LLCC
- tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
- depends on QCOM_LLCC
- help
- Say yes here to enable the LLCC driver for SDM845. This provides
- data required to configure LLCC so that clients can start using the
- LLCC slices.
+ Last Level Cache Controller(LLCC) driver for platforms such as,
+ SDM845. This provides interfaces to clients that use the LLCC.
+ Say yes here to enable LLCC slice driver.
config QCOM_MDT_LOADER
tristate
select QCOM_SCM
+config QCOM_OCMEM
+ tristate "Qualcomm On Chip Memory (OCMEM) driver"
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ The On Chip Memory (OCMEM) allocator allows various clients to
+ allocate memory from OCMEM based on performance, latency and power
+ requirements. This is typically used by the GPU, camera/video, and
+ audio components on some Snapdragon SoCs.
+
config QCOM_PM
bool "Qualcomm Power Management"
depends on ARCH_QCOM && !ARM64
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 162788701a77..9fb35c8a495e 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
@@ -21,7 +22,6 @@ obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
-obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
-obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-qcom.c
index 9090ea12eaf3..429b5a60a1ba 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
*/
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
@@ -46,15 +47,90 @@
#define BANK_OFFSET_STRIDE 0x80000
-static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
+/**
+ * llcc_slice_config - Data associated with the llcc slice
+ * @usecase_id: Unique id for the client's use case
+ * @slice_id: llcc slice id for each client
+ * @max_cap: The maximum capacity of the cache slice provided in KB
+ * @priority: Priority of the client used to select victim line for replacement
+ * @fixed_size: Boolean indicating if the slice has a fixed capacity
+ * @bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * @res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * @cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * @probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * @dis_cap_alloc: Disable capacity based allocation for a client
+ * @retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * @activate_on_init: Activate the slice immediately after it is programmed
+ */
+struct llcc_slice_config {
+ u32 usecase_id;
+ u32 slice_id;
+ u32 max_cap;
+ u32 priority;
+ bool fixed_size;
+ u32 bonus_ways;
+ u32 res_ways;
+ u32 cache_mode;
+ u32 probe_target_ways;
+ bool dis_cap_alloc;
+ bool retain_on_pc;
+ bool activate_on_init;
+};
+
+struct qcom_llcc_config {
+ const struct llcc_slice_config *sct_data;
+ int size;
+};
+
+static const struct llcc_slice_config sc7180_data[] = {
+ { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sdm845_data[] = {
+ { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 },
+ { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 },
+ { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+};
+
+static const struct qcom_llcc_config sc7180_cfg = {
+ .sct_data = sc7180_data,
+ .size = ARRAY_SIZE(sc7180_data),
+};
-static const struct regmap_config llcc_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .fast_io = true,
+static const struct qcom_llcc_config sdm845_cfg = {
+ .sct_data = sdm845_data,
+ .size = ARRAY_SIZE(sdm845_data),
};
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
+
/**
* llcc_slice_getd - get llcc slice descriptor
* @uid: usecase_id for the client
@@ -301,19 +377,24 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
return ret;
}
-int qcom_llcc_remove(struct platform_device *pdev)
+static int qcom_llcc_remove(struct platform_device *pdev)
{
/* Set the global pointer to a error code to avoid referencing it */
drv_data = ERR_PTR(-ENODEV);
return 0;
}
-EXPORT_SYMBOL_GPL(qcom_llcc_remove);
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
const char *name)
{
struct resource *res;
void __iomem *base;
+ struct regmap_config llcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ };
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (!res)
@@ -323,16 +404,19 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
if (IS_ERR(base))
return ERR_CAST(base);
+ llcc_regmap_config.name = name;
return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
}
-int qcom_llcc_probe(struct platform_device *pdev,
- const struct llcc_slice_config *llcc_cfg, u32 sz)
+static int qcom_llcc_probe(struct platform_device *pdev)
{
u32 num_banks;
struct device *dev = &pdev->dev;
int ret, i;
struct platform_device *llcc_edac;
+ const struct qcom_llcc_config *cfg;
+ const struct llcc_slice_config *llcc_cfg;
+ u32 sz;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
@@ -362,6 +446,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
num_banks >>= LLCC_LB_CNT_SHIFT;
drv_data->num_banks = num_banks;
+ cfg = of_device_get_match_data(&pdev->dev);
+ llcc_cfg = cfg->sct_data;
+ sz = cfg->size;
+
for (i = 0; i < sz; i++)
if (llcc_cfg[i].slice_id > drv_data->max_slices)
drv_data->max_slices = llcc_cfg[i].slice_id;
@@ -407,6 +495,22 @@ err:
drv_data = ERR_PTR(-ENODEV);
return ret;
}
-EXPORT_SYMBOL_GPL(qcom_llcc_probe);
-MODULE_LICENSE("GPL v2");
+
+static const struct of_device_id qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
+ { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { }
+};
+
+static struct platform_driver qcom_llcc_driver = {
+ .driver = {
+ .name = "qcom-llcc",
+ .of_match_table = qcom_llcc_of_match,
+ },
+ .probe = qcom_llcc_probe,
+ .remove = qcom_llcc_remove,
+};
+module_platform_driver(qcom_llcc_driver);
+
MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
deleted file mode 100644
index 86600d97c36d..000000000000
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/soc/qcom/llcc-qcom.h>
-
-/*
- * SCT(System Cache Table) entry contains of the following members:
- * usecase_id: Unique id for the client's use case
- * slice_id: llcc slice id for each client
- * max_cap: The maximum capacity of the cache slice provided in KB
- * priority: Priority of the client used to select victim line for replacement
- * fixed_size: Boolean indicating if the slice has a fixed capacity
- * bonus_ways: Bonus ways are additional ways to be used for any slice,
- * if client ends up using more than reserved cache ways. Bonus
- * ways are allocated only if they are not reserved for some
- * other client.
- * res_ways: Reserved ways for the cache slice, the reserved ways cannot
- * be used by any other client than the one its assigned to.
- * cache_mode: Each slice operates as a cache, this controls the mode of the
- * slice: normal or TCM(Tightly Coupled Memory)
- * probe_target_ways: Determines what ways to probe for access hit. When
- * configured to 1 only bonus and reserved ways are probed.
- * When configured to 0 all ways in llcc are probed.
- * dis_cap_alloc: Disable capacity based allocation for a client
- * retain_on_pc: If this bit is set and client has maintained active vote
- * then the ways assigned to this client are not flushed on power
- * collapse.
- * activate_on_init: Activate the slice immediately after the SCT is programmed
- */
-#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
- { \
- .usecase_id = uid, \
- .slice_id = sid, \
- .max_cap = mc, \
- .priority = p, \
- .fixed_size = fs, \
- .bonus_ways = bway, \
- .res_ways = rway, \
- .cache_mode = cmod, \
- .probe_target_ways = ptw, \
- .dis_cap_alloc = dca, \
- .retain_on_pc = rp, \
- .activate_on_init = a, \
- }
-
-static struct llcc_slice_config sdm845_data[] = {
- SCT_ENTRY(LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1),
- SCT_ENTRY(LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1),
- SCT_ENTRY(LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
-};
-
-static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
-{
- return qcom_llcc_remove(pdev);
-}
-
-static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
-{
- return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
-}
-
-static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
- { .compatible = "qcom,sdm845-llcc", },
- { }
-};
-
-static struct platform_driver sdm845_qcom_llcc_driver = {
- .driver = {
- .name = "sdm845-llcc",
- .of_match_table = sdm845_qcom_llcc_of_match,
- },
- .probe = sdm845_qcom_llcc_probe,
- .remove = sdm845_qcom_llcc_remove,
-};
-module_platform_driver(sdm845_qcom_llcc_driver);
-
-MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
new file mode 100644
index 000000000000..7f9e9944d1ea
--- /dev/null
+++ b/drivers/soc/qcom/ocmem.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The On Chip Memory (OCMEM) allocator allows various clients to allocate
+ * memory from OCMEM based on performance, latency and power requirements.
+ * This is typically used by the GPU, camera/video, and audio components on
+ * some Snapdragon SoCs.
+ *
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <soc/qcom/ocmem.h>
+
+enum region_mode {
+ WIDE_MODE = 0x0,
+ THIN_MODE,
+ MODE_DEFAULT = WIDE_MODE,
+};
+
+enum ocmem_macro_state {
+ PASSTHROUGH = 0,
+ PERI_ON = 1,
+ CORE_ON = 2,
+ CLK_OFF = 4,
+};
+
+struct ocmem_region {
+ bool interleaved;
+ enum region_mode mode;
+ unsigned int num_macros;
+ enum ocmem_macro_state macro_state[4];
+ unsigned long macro_size;
+ unsigned long region_size;
+};
+
+struct ocmem_config {
+ uint8_t num_regions;
+ unsigned long macro_size;
+};
+
+struct ocmem {
+ struct device *dev;
+ const struct ocmem_config *config;
+ struct resource *memory;
+ void __iomem *mmio;
+ unsigned int num_ports;
+ unsigned int num_macros;
+ bool interleaved;
+ struct ocmem_region *regions;
+ unsigned long active_allocations;
+};
+
+#define OCMEM_MIN_ALIGN SZ_64K
+#define OCMEM_MIN_ALLOC SZ_64K
+
+#define OCMEM_REG_HW_VERSION 0x00000000
+#define OCMEM_REG_HW_PROFILE 0x00000004
+
+#define OCMEM_REG_REGION_MODE_CTL 0x00001000
+#define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001
+#define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002
+#define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004
+#define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008
+
+#define OCMEM_REG_GFX_MPU_START 0x00001004
+#define OCMEM_REG_GFX_MPU_END 0x00001008
+
+#define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_PREP(0x0000000f, (val))
+#define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_PREP(0x00003f00, (val))
+
+#define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000
+#define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000
+#define OCMEM_REG_GEN_STATUS 0x0000000c
+
+#define OCMEM_REG_PSGSC_STATUS 0x00000038
+#define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0))
+
+#define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val))
+#define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val))
+#define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val))
+#define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val))
+
+#define OCMEM_CLK_CORE_IDX 0
+static struct clk_bulk_data ocmem_clks[] = {
+ {
+ .id = "core",
+ },
+ {
+ .id = "iface",
+ },
+};
+
+static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data)
+{
+ writel(data, ocmem->mmio + reg);
+}
+
+static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg)
+{
+ return readl(ocmem->mmio + reg);
+}
+
+static void update_ocmem(struct ocmem *ocmem)
+{
+ uint32_t region_mode_ctrl = 0x0;
+ int i;
+
+ if (!qcom_scm_ocmem_lock_available()) {
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (region->mode == THIN_MODE)
+ region_mode_ctrl |= BIT(i);
+ }
+
+ dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n",
+ region_mode_ctrl);
+ ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl);
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+ u32 data;
+
+ data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) |
+ OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) |
+ OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) |
+ OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]);
+
+ ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data);
+ }
+}
+
+static unsigned long phys_to_offset(struct ocmem *ocmem,
+ unsigned long addr)
+{
+ if (addr < ocmem->memory->start || addr >= ocmem->memory->end)
+ return 0;
+
+ return addr - ocmem->memory->start;
+}
+
+static unsigned long device_address(struct ocmem *ocmem,
+ enum ocmem_client client,
+ unsigned long addr)
+{
+ WARN_ON(client != OCMEM_GRAPHICS);
+
+ /* TODO: gpu uses phys_to_offset, but others do not.. */
+ return phys_to_offset(ocmem, addr);
+}
+
+static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
+ enum ocmem_macro_state mstate, enum region_mode rmode)
+{
+ unsigned long offset = 0;
+ int i, j;
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (buf->offset <= offset && offset < buf->offset + buf->len)
+ region->mode = rmode;
+
+ for (j = 0; j < region->num_macros; j++) {
+ if (buf->offset <= offset &&
+ offset < buf->offset + buf->len)
+ region->macro_state[j] = mstate;
+
+ offset += region->macro_size;
+ }
+ }
+
+ update_ocmem(ocmem);
+}
+
+struct ocmem *of_get_ocmem(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *devnode;
+
+ devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ if (!devnode || !devnode->parent) {
+ dev_err(dev, "Cannot look up sram phandle\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(devnode->parent);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", devnode->name);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL(of_get_ocmem);
+
+struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
+ unsigned long size)
+{
+ struct ocmem_buf *buf;
+ int ret;
+
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return ERR_PTR(-ENODEV);
+
+ if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN))
+ return ERR_PTR(-EINVAL);
+
+ if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
+ return ERR_PTR(-EBUSY);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ buf->offset = 0;
+ buf->addr = device_address(ocmem, client, buf->offset);
+ buf->len = size;
+
+ update_range(ocmem, buf, CORE_ON, WIDE_MODE);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len, WIDE_MODE);
+ if (ret) {
+ dev_err(ocmem->dev, "could not lock: %d\n", ret);
+ ret = -EINVAL;
+ goto err_kfree;
+ }
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END,
+ buf->offset + buf->len);
+ }
+
+ dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
+ size / 1024, buf->addr, client);
+
+ return buf;
+
+err_kfree:
+ kfree(buf);
+err_unlock:
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(ocmem_allocate);
+
+void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
+ struct ocmem_buf *buf)
+{
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return;
+
+ update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ int ret;
+
+ ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len);
+ if (ret)
+ dev_err(ocmem->dev, "could not unlock: %d\n", ret);
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0);
+ }
+
+ kfree(buf);
+
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+}
+EXPORT_SYMBOL(ocmem_free);
+
+static int ocmem_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long reg, region_size;
+ int i, j, ret, num_banks;
+ struct resource *res;
+ struct ocmem *ocmem;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL);
+ if (!ocmem)
+ return -ENOMEM;
+
+ ocmem->dev = dev;
+ ocmem->config = device_get_match_data(dev);
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Unable to get clocks\n");
+
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+ ocmem->mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ocmem->mmio)) {
+ dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n");
+ return PTR_ERR(ocmem->mmio);
+ }
+
+ ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem");
+ if (!ocmem->memory) {
+ dev_err(dev, "Could not get mem region\n");
+ return -ENXIO;
+ }
+
+ /* The core clock is synchronous with graphics */
+ WARN_ON(clk_set_rate(ocmem_clks[OCMEM_CLK_CORE_IDX].clk, 1000) < 0);
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ if (ret) {
+ dev_info(ocmem->dev, "Failed to enable clocks\n");
+ return ret;
+ }
+
+ if (qcom_scm_restore_sec_cfg_available()) {
+ dev_dbg(dev, "configuring scm\n");
+ ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0);
+ if (ret) {
+ dev_err(dev, "Could not enable secure configuration\n");
+ goto err_clk_disable;
+ }
+ }
+
+ reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE);
+ ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg);
+ ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg);
+ ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING);
+
+ num_banks = ocmem->num_ports / 2;
+ region_size = ocmem->config->macro_size * num_banks;
+
+ dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n",
+ ocmem->num_ports, ocmem->config->num_regions,
+ ocmem->num_macros, ocmem->interleaved ? "" : "not ");
+
+ ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions,
+ sizeof(struct ocmem_region), GFP_KERNEL);
+ if (!ocmem->regions) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ region->mode = MODE_DEFAULT;
+ region->num_macros = num_banks;
+
+ if (i == (ocmem->config->num_regions - 1) &&
+ reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) {
+ region->macro_size = ocmem->config->macro_size / 2;
+ region->region_size = region_size / 2;
+ } else {
+ region->macro_size = ocmem->config->macro_size;
+ region->region_size = region_size;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(region->macro_state); j++)
+ region->macro_state[j] = CLK_OFF;
+ }
+
+ platform_set_drvdata(pdev, ocmem);
+
+ return 0;
+
+err_clk_disable:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ return ret;
+}
+
+static int ocmem_dev_remove(struct platform_device *pdev)
+{
+ clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+
+ return 0;
+}
+
+static const struct ocmem_config ocmem_8974_config = {
+ .num_regions = 3,
+ .macro_size = SZ_128K,
+};
+
+static const struct of_device_id ocmem_of_match[] = {
+ { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ocmem_of_match);
+
+static struct platform_driver ocmem_driver = {
+ .probe = ocmem_dev_probe,
+ .remove = ocmem_dev_remove,
+ .driver = {
+ .name = "ocmem",
+ .of_match_table = ocmem_of_match,
+ },
+};
+
+module_platform_driver(ocmem_driver);
+
+MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 33a27e6c6d67..006ac40c526a 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -44,7 +44,7 @@
#define QMP_NUM_COOLING_RESOURCES 2
-static bool qmp_cdev_init_state = 1;
+static bool qmp_cdev_max_state = 1;
struct qmp_cooling_device {
struct thermal_cooling_device *cdev;
@@ -402,7 +402,7 @@ static void qmp_pd_remove(struct qmp *qmp)
static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = qmp_cdev_init_state;
+ *state = qmp_cdev_max_state;
return 0;
}
@@ -432,7 +432,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
snprintf(buf, sizeof(buf),
"{class: volt_flr, event:zero_temp, res:%s, value:%s}",
qmp_cdev->name,
- cdev_state ? "off" : "on");
+ cdev_state ? "on" : "off");
ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf));
@@ -455,7 +455,7 @@ static int qmp_cooling_device_add(struct qmp *qmp,
char *cdev_name = (char *)node->name;
qmp_cdev->qmp = qmp;
- qmp_cdev->state = qmp_cdev_init_state;
+ qmp_cdev->state = !qmp_cdev_max_state;
qmp_cdev->name = cdev_name;
qmp_cdev->cdev = devm_thermal_of_cooling_device_register
(qmp->dev, node,
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 3c1a55cf25d6..2b1834c5609a 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -115,6 +115,28 @@ struct rpmpd_desc {
static DEFINE_MUTEX(rpmpd_lock);
+/* msm8976 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
+DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
+
+DEFINE_RPMPD_VFL(msm8976, vddcx_vfl, RWSC, 2);
+DEFINE_RPMPD_VFL(msm8976, vddmx_vfl, RWSM, 6);
+
+static struct rpmpd *msm8976_rpmpds[] = {
+ [MSM8976_VDDCX] = &msm8976_vddcx,
+ [MSM8976_VDDCX_AO] = &msm8976_vddcx_ao,
+ [MSM8976_VDDCX_VFL] = &msm8976_vddcx_vfl,
+ [MSM8976_VDDMX] = &msm8976_vddmx,
+ [MSM8976_VDDMX_AO] = &msm8976_vddmx_ao,
+ [MSM8976_VDDMX_VFL] = &msm8976_vddmx_vfl,
+};
+
+static const struct rpmpd_desc msm8976_desc = {
+ .rpmpds = msm8976_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8976_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_HIGH,
+};
+
/* msm8996 RPM Power domains */
DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
@@ -198,6 +220,7 @@ static const struct rpmpd_desc qcs404_desc = {
};
static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index fa9dd12b5e39..005dd30c58fa 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -19,12 +19,14 @@
/**
* struct qcom_smd_rpm - state of the rpm device driver
* @rpm_channel: reference to the smd channel
+ * @icc: interconnect proxy device
* @ack: completion for acks
* @lock: mutual exclusion around the send/complete pair
* @ack_status: result of the rpm request
*/
struct qcom_smd_rpm {
struct rpmsg_endpoint *rpm_channel;
+ struct platform_device *icc;
struct device *dev;
struct completion ack;
@@ -193,6 +195,7 @@ static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
{
struct qcom_smd_rpm *rpm;
+ int ret;
rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
if (!rpm)
@@ -205,11 +208,23 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
rpm->rpm_channel = rpdev->ept;
dev_set_drvdata(&rpdev->dev, rpm);
- return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+ rpm->icc = platform_device_register_data(&rpdev->dev, "icc_smd_rpm", -1,
+ NULL, 0);
+ if (IS_ERR(rpm->icc))
+ return PTR_ERR(rpm->icc);
+
+ ret = of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+ if (ret)
+ platform_device_unregister(rpm->icc);
+
+ return ret;
}
static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
{
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+
+ platform_device_unregister(rpm->icc);
of_platform_depopulate(&rpdev->dev);
}
@@ -217,6 +232,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
{ .compatible = "qcom,rpm-msm8916" },
{ .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8976" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
{ .compatible = "qcom,rpm-sdm660" },
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index a39ea5061dc5..7864b75ce569 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -198,6 +198,8 @@ static const struct soc_id soc_id[] = {
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
+ { 321, "SDM845" },
+ { 341, "SDA845" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 3c5e017bacba..f93492b72c04 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -178,6 +178,13 @@ config ARCH_R8A774A1
help
This enables support for the Renesas RZ/G2M SoC.
+config ARCH_R8A774B1
+ bool "Renesas RZ/G2N SoC Platform"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774B1
+ help
+ This enables support for the Renesas RZ/G2N SoC.
+
config ARCH_R8A774C0
bool "Renesas RZ/G2E SoC Platform"
select ARCH_RCAR_GEN3
@@ -192,13 +199,24 @@ config ARCH_R8A7795
help
This enables support for the Renesas R-Car H3 SoC.
+config ARCH_R8A77960
+ bool
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77960
+
config ARCH_R8A7796
bool "Renesas R-Car M3-W SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A7796
+ select ARCH_R8A77960
help
This enables support for the Renesas R-Car M3-W SoC.
+config ARCH_R8A77961
+ bool "Renesas R-Car M3-W+ SoC Platform"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77961
+ help
+ This enables support for the Renesas R-Car M3-W+ SoC.
+
config ARCH_R8A77965
bool "Renesas R-Car M3-N SoC Platform"
select ARCH_RCAR_GEN3
@@ -253,6 +271,10 @@ config SYSC_R8A774A1
bool "RZ/G2M System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A774B1
+ bool "RZ/G2N System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A774C0
bool "RZ/G2E System Controller support" if COMPILE_TEST
select SYSC_RCAR
@@ -281,10 +303,14 @@ config SYSC_R8A7795
bool "R-Car H3 System Controller support" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A7796
+config SYSC_R8A77960
bool "R-Car M3-W System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A77961
+ bool "R-Car M3-W+ System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A77965
bool "R-Car M3-N System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 00764d5a60b3..e595c3c3bd10 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SYSC_R8A7743) += r8a7743-sysc.o
obj-$(CONFIG_SYSC_R8A7745) += r8a7745-sysc.o
obj-$(CONFIG_SYSC_R8A77470) += r8a77470-sysc.o
obj-$(CONFIG_SYSC_R8A774A1) += r8a774a1-sysc.o
+obj-$(CONFIG_SYSC_R8A774B1) += r8a774b1-sysc.o
obj-$(CONFIG_SYSC_R8A774C0) += r8a774c0-sysc.o
obj-$(CONFIG_SYSC_R8A7779) += r8a7779-sysc.o
obj-$(CONFIG_SYSC_R8A7790) += r8a7790-sysc.o
@@ -14,7 +15,8 @@ obj-$(CONFIG_SYSC_R8A7791) += r8a7791-sysc.o
obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o
obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o
obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o
-obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77960) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77961) += r8a7796-sysc.o
obj-$(CONFIG_SYSC_R8A77965) += r8a77965-sysc.o
obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
index edf6436e879f..4e2c0ab951b3 100644
--- a/drivers/soc/renesas/r8a7743-sysc.c
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7743-sysc.h>
diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c
index 65dc6b09cc85..865821a2f0c6 100644
--- a/drivers/soc/renesas/r8a7745-sysc.c
+++ b/drivers/soc/renesas/r8a7745-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7745-sysc.h>
diff --git a/drivers/soc/renesas/r8a77470-sysc.c b/drivers/soc/renesas/r8a77470-sysc.c
index cfa015e208ef..1eeb8018df50 100644
--- a/drivers/soc/renesas/r8a77470-sysc.c
+++ b/drivers/soc/renesas/r8a77470-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2018 Renesas Electronics Corp.
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77470-sysc.h>
diff --git a/drivers/soc/renesas/r8a774a1-sysc.c b/drivers/soc/renesas/r8a774a1-sysc.c
index 9db51ff6f5ed..38ac2c689ff0 100644
--- a/drivers/soc/renesas/r8a774a1-sysc.c
+++ b/drivers/soc/renesas/r8a774a1-sysc.c
@@ -7,7 +7,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a774a1-sysc.h>
diff --git a/drivers/soc/renesas/r8a774b1-sysc.c b/drivers/soc/renesas/r8a774b1-sysc.c
new file mode 100644
index 000000000000..5f97ff26f3f8
--- /dev/null
+++ b/drivers/soc/renesas/r8a774b1-sysc.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2N System Controller
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on Renesas R-Car M3-W System Controller
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a774b1-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a774b1_areas[] __initconst = {
+ { "always-on", 0, 0, R8A774B1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A774B1_PD_CA57_SCU, R8A774B1_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A774B1_PD_CA57_CPU0, R8A774B1_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A774B1_PD_CA57_CPU1, R8A774B1_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "a3vc", 0x380, 0, R8A774B1_PD_A3VC, R8A774B1_PD_ALWAYS_ON },
+ { "a3vp", 0x340, 0, R8A774B1_PD_A3VP, R8A774B1_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A774B1_PD_A2VC1, R8A774B1_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A774B1_PD_3DG_A, R8A774B1_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A774B1_PD_3DG_B, R8A774B1_PD_3DG_A },
+};
+
+const struct rcar_sysc_info r8a774b1_sysc_info __initconst = {
+ .areas = r8a774b1_areas,
+ .num_areas = ARRAY_SIZE(r8a774b1_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
+};
diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c
index 11050e17ea81..c1c216f7d073 100644
--- a/drivers/soc/renesas/r8a774c0-sysc.c
+++ b/drivers/soc/renesas/r8a774c0-sysc.c
@@ -6,7 +6,7 @@
* Based on Renesas R-Car E3 System Controller
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -50,4 +50,6 @@ const struct rcar_sysc_info r8a774c0_sysc_info __initconst = {
.init = r8a774c0_sysc_init,
.areas = r8a774c0_areas,
.num_areas = ARRAY_SIZE(r8a774c0_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a7779-sysc.c b/drivers/soc/renesas/r8a7779-sysc.c
index 517aa40fa6e6..e24a7151d55f 100644
--- a/drivers/soc/renesas/r8a7779-sysc.c
+++ b/drivers/soc/renesas/r8a7779-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7779-sysc.h>
diff --git a/drivers/soc/renesas/r8a7790-sysc.c b/drivers/soc/renesas/r8a7790-sysc.c
index 9b5a6bb62152..b9afe7f6245b 100644
--- a/drivers/soc/renesas/r8a7790-sysc.c
+++ b/drivers/soc/renesas/r8a7790-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7790-sysc.h>
diff --git a/drivers/soc/renesas/r8a7791-sysc.c b/drivers/soc/renesas/r8a7791-sysc.c
index acf545cdebfb..f00fa24522a3 100644
--- a/drivers/soc/renesas/r8a7791-sysc.c
+++ b/drivers/soc/renesas/r8a7791-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7791-sysc.h>
diff --git a/drivers/soc/renesas/r8a7792-sysc.c b/drivers/soc/renesas/r8a7792-sysc.c
index 05b78525cc43..60aae242c43f 100644
--- a/drivers/soc/renesas/r8a7792-sysc.c
+++ b/drivers/soc/renesas/r8a7792-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/soc/renesas/r8a7794-sysc.c b/drivers/soc/renesas/r8a7794-sysc.c
index 0d42637fa662..72ef4e85458f 100644
--- a/drivers/soc/renesas/r8a7794-sysc.c
+++ b/drivers/soc/renesas/r8a7794-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7794-sysc.h>
diff --git a/drivers/soc/renesas/r8a7795-sysc.c b/drivers/soc/renesas/r8a7795-sysc.c
index cda27a67de98..91074411b8cf 100644
--- a/drivers/soc/renesas/r8a7795-sysc.c
+++ b/drivers/soc/renesas/r8a7795-sysc.c
@@ -5,7 +5,7 @@
* Copyright (C) 2016-2017 Glider bvba
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -51,25 +51,46 @@ static struct rcar_sysc_area r8a7795_areas[] __initdata = {
/*
- * Fixups for R-Car H3 revisions after ES1.x
+ * Fixups for R-Car H3 revisions
*/
-static const struct soc_device_attribute r8a7795es1[] __initconst = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
+#define HAS_A2VC0 BIT(0) /* Power domain A2VC0 is present */
+#define NO_EXTMASK BIT(1) /* Missing SYSCEXTMASK register */
+
+static const struct soc_device_attribute r8a7795_quirks_match[] __initconst = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = (void *)(HAS_A2VC0 | NO_EXTMASK),
+ }, {
+ .soc_id = "r8a7795", .revision = "ES2.*",
+ .data = (void *)(NO_EXTMASK),
+ },
{ /* sentinel */ }
};
static int __init r8a7795_sysc_init(void)
{
- if (!soc_device_match(r8a7795es1))
+ const struct soc_device_attribute *attr;
+ u32 quirks = 0;
+
+ attr = soc_device_match(r8a7795_quirks_match);
+ if (attr)
+ quirks = (uintptr_t)attr->data;
+
+ if (!(quirks & HAS_A2VC0))
rcar_sysc_nullify(r8a7795_areas, ARRAY_SIZE(r8a7795_areas),
R8A7795_PD_A2VC0);
+ if (quirks & NO_EXTMASK)
+ r8a7795_sysc_info.extmask_val = 0;
+
return 0;
}
-const struct rcar_sysc_info r8a7795_sysc_info __initconst = {
+struct rcar_sysc_info r8a7795_sysc_info __initdata = {
.init = r8a7795_sysc_init,
.areas = r8a7795_areas,
.num_areas = ARRAY_SIZE(r8a7795_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a7796-sysc.c b/drivers/soc/renesas/r8a7796-sysc.c
index 1b06f868b6e8..471bd5b3b6ad 100644
--- a/drivers/soc/renesas/r8a7796-sysc.c
+++ b/drivers/soc/renesas/r8a7796-sysc.c
@@ -1,18 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Renesas R-Car M3-W System Controller
+ * Renesas R-Car M3-W/W+ System Controller
*
* Copyright (C) 2016 Glider bvba
+ * Copyright (C) 2018-2019 Renesas Electronics Corporation
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7796-sysc.h>
#include "rcar-sysc.h"
-static const struct rcar_sysc_area r8a7796_areas[] __initconst = {
+static struct rcar_sysc_area r8a7796_areas[] __initdata = {
{ "always-on", 0, 0, R8A7796_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A7796_PD_CA57_SCU, R8A7796_PD_ALWAYS_ON,
PD_SCU },
@@ -39,7 +40,28 @@ static const struct rcar_sysc_area r8a7796_areas[] __initconst = {
{ "a3ir", 0x180, 0, R8A7796_PD_A3IR, R8A7796_PD_ALWAYS_ON },
};
-const struct rcar_sysc_info r8a7796_sysc_info __initconst = {
+
+#ifdef CONFIG_SYSC_R8A77960
+const struct rcar_sysc_info r8a77960_sysc_info __initconst = {
+ .areas = r8a7796_areas,
+ .num_areas = ARRAY_SIZE(r8a7796_areas),
+};
+#endif /* CONFIG_SYSC_R8A77960 */
+
+#ifdef CONFIG_SYSC_R8A77961
+static int __init r8a77961_sysc_init(void)
+{
+ rcar_sysc_nullify(r8a7796_areas, ARRAY_SIZE(r8a7796_areas),
+ R8A7796_PD_A2VC0);
+
+ return 0;
+}
+
+const struct rcar_sysc_info r8a77961_sysc_info __initconst = {
+ .init = r8a77961_sysc_init,
.areas = r8a7796_areas,
.num_areas = ARRAY_SIZE(r8a7796_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
+#endif /* CONFIG_SYSC_R8A77961 */
diff --git a/drivers/soc/renesas/r8a77965-sysc.c b/drivers/soc/renesas/r8a77965-sysc.c
index e0533beb50fd..ff0b0d116992 100644
--- a/drivers/soc/renesas/r8a77965-sysc.c
+++ b/drivers/soc/renesas/r8a77965-sysc.c
@@ -7,7 +7,7 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77965-sysc.h>
@@ -33,4 +33,6 @@ static const struct rcar_sysc_area r8a77965_areas[] __initconst = {
const struct rcar_sysc_info r8a77965_sysc_info __initconst = {
.areas = r8a77965_areas,
.num_areas = ARRAY_SIZE(r8a77965_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
index 280c48b80f24..706258250600 100644
--- a/drivers/soc/renesas/r8a77970-sysc.c
+++ b/drivers/soc/renesas/r8a77970-sysc.c
@@ -5,7 +5,7 @@
* Copyright (C) 2017 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77970-sysc.h>
@@ -32,4 +32,6 @@ static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
.areas = r8a77970_areas,
.num_areas = ARRAY_SIZE(r8a77970_areas),
+ .extmask_offs = 0x1b0,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
index a8dbe55e8ba8..39ca84a67daa 100644
--- a/drivers/soc/renesas/r8a77980-sysc.c
+++ b/drivers/soc/renesas/r8a77980-sysc.c
@@ -6,7 +6,7 @@
* Copyright (C) 2018 Cogent Embedded, Inc.
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77980-sysc.h>
@@ -49,4 +49,6 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
const struct rcar_sysc_info r8a77980_sysc_info __initconst = {
.areas = r8a77980_areas,
.num_areas = ARRAY_SIZE(r8a77980_areas),
+ .extmask_offs = 0x138,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77990-sysc.c b/drivers/soc/renesas/r8a77990-sysc.c
index 664b244eb1dd..9f92737dc352 100644
--- a/drivers/soc/renesas/r8a77990-sysc.c
+++ b/drivers/soc/renesas/r8a77990-sysc.c
@@ -5,7 +5,7 @@
* Copyright (C) 2018 Renesas Electronics Corp.
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -50,4 +50,6 @@ const struct rcar_sysc_info r8a77990_sysc_info __initconst = {
.init = r8a77990_sysc_init,
.areas = r8a77990_areas,
.num_areas = ARRAY_SIZE(r8a77990_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77995-sysc.c b/drivers/soc/renesas/r8a77995-sysc.c
index 6243aaaf60fb..efcc67e3d76d 100644
--- a/drivers/soc/renesas/r8a77995-sysc.c
+++ b/drivers/soc/renesas/r8a77995-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2017 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77995-sysc.h>
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index d183c381e8db..14d05a070dd3 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -45,6 +45,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a77470-rst", .data = &rcar_rst_gen2 },
/* RZ/G2 is handled like R-Car Gen3 */
{ .compatible = "renesas,r8a774a1-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774b1-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a774c0-rst", .data = &rcar_rst_gen3 },
/* R-Car Gen1 */
{ .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 },
@@ -58,6 +59,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
/* R-Car Gen3 */
{ .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77961-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77965-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 59b5e6b10272..f0b291e02b8a 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -63,6 +63,7 @@ struct rcar_sysc_ch {
static void __iomem *rcar_sysc_base;
static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
+static u32 rcar_sysc_extmask_offs, rcar_sysc_extmask_val;
static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
{
@@ -106,6 +107,14 @@ static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
spin_lock_irqsave(&rcar_sysc_lock, flags);
/*
+ * Mask external power requests for CPU or 3DG domains
+ */
+ if (rcar_sysc_extmask_val) {
+ iowrite32(rcar_sysc_extmask_val,
+ rcar_sysc_base + rcar_sysc_extmask_offs);
+ }
+
+ /*
* The interrupt source needs to be enabled, but masked, to prevent the
* CPU from receiving it.
*/
@@ -148,6 +157,9 @@ static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
out:
+ if (rcar_sysc_extmask_val)
+ iowrite32(0, rcar_sysc_base + rcar_sysc_extmask_offs);
+
spin_unlock_irqrestore(&rcar_sysc_lock, flags);
pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
@@ -275,6 +287,9 @@ static const struct of_device_id rcar_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A774A1
{ .compatible = "renesas,r8a774a1-sysc", .data = &r8a774a1_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A774B1
+ { .compatible = "renesas,r8a774b1-sysc", .data = &r8a774b1_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A774C0
{ .compatible = "renesas,r8a774c0-sysc", .data = &r8a774c0_sysc_info },
#endif
@@ -298,8 +313,11 @@ static const struct of_device_id rcar_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A7795
{ .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
#endif
-#ifdef CONFIG_SYSC_R8A7796
- { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
+#ifdef CONFIG_SYSC_R8A77960
+ { .compatible = "renesas,r8a7796-sysc", .data = &r8a77960_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77961
+ { .compatible = "renesas,r8a77961-sysc", .data = &r8a77961_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77965
{ .compatible = "renesas,r8a77965-sysc", .data = &r8a77965_sysc_info },
@@ -360,6 +378,10 @@ static int __init rcar_sysc_pd_init(void)
rcar_sysc_base = base;
+ /* Optional External Request Mask Register */
+ rcar_sysc_extmask_offs = info->extmask_offs;
+ rcar_sysc_extmask_val = info->extmask_val;
+
domains = kzalloc(sizeof(*domains), GFP_KERNEL);
if (!domains) {
error = -ENOMEM;
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 485520a5b295..8d074489fba9 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -44,20 +44,25 @@ struct rcar_sysc_info {
int (*init)(void); /* Optional */
const struct rcar_sysc_area *areas;
unsigned int num_areas;
+ /* Optional External Request Mask Register */
+ u32 extmask_offs; /* SYSCEXTMASK register offset */
+ u32 extmask_val; /* SYSCEXTMASK register mask value */
};
extern const struct rcar_sysc_info r8a7743_sysc_info;
extern const struct rcar_sysc_info r8a7745_sysc_info;
extern const struct rcar_sysc_info r8a77470_sysc_info;
extern const struct rcar_sysc_info r8a774a1_sysc_info;
+extern const struct rcar_sysc_info r8a774b1_sysc_info;
extern const struct rcar_sysc_info r8a774c0_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
-extern const struct rcar_sysc_info r8a7795_sysc_info;
-extern const struct rcar_sysc_info r8a7796_sysc_info;
+extern struct rcar_sysc_info r8a7795_sysc_info;
+extern const struct rcar_sysc_info r8a77960_sysc_info;
+extern const struct rcar_sysc_info r8a77961_sysc_info;
extern const struct rcar_sysc_info r8a77965_sysc_info;
extern const struct rcar_sysc_info r8a77970_sysc_info;
extern const struct rcar_sysc_info r8a77980_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 3299cf5365f3..850f5733dc88 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -116,6 +116,11 @@ static const struct renesas_soc soc_rz_g2m __initconst __maybe_unused = {
.id = 0x52,
};
+static const struct renesas_soc soc_rz_g2n __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x55,
+};
+
static const struct renesas_soc soc_rz_g2e __initconst __maybe_unused = {
.family = &fam_rzg2,
.id = 0x57,
@@ -227,6 +232,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A774A1
{ .compatible = "renesas,r8a774a1", .data = &soc_rz_g2m },
#endif
+#ifdef CONFIG_ARCH_R8A774B1
+ { .compatible = "renesas,r8a774b1", .data = &soc_rz_g2n },
+#endif
#ifdef CONFIG_ARCH_R8A774C0
{ .compatible = "renesas,r8a774c0", .data = &soc_rz_g2e },
#endif
@@ -254,9 +262,12 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7795
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
#endif
-#ifdef CONFIG_ARCH_R8A7796
+#ifdef CONFIG_ARCH_R8A77960
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
#endif
+#ifdef CONFIG_ARCH_R8A77961
+ { .compatible = "renesas,r8a77961", .data = &soc_rcar_m3_w },
+#endif
#ifdef CONFIG_ARCH_R8A77965
{ .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
#endif
@@ -326,7 +337,7 @@ static int __init renesas_soc_init(void)
if (np) {
chipid = of_iomap(np, 0);
of_node_put(np);
- } else if (soc->id) {
+ } else if (soc->id && family->reg) {
chipid = ioremap(family->reg, 4);
}
if (chipid) {
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 33ad0de2de3c..27fc59bbb520 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -7,6 +7,16 @@ menuconfig SOC_SAMSUNG
if SOC_SAMSUNG
+config EXYNOS_ASV
+ bool "Exynos Adaptive Supply Voltage support" if COMPILE_TEST
+ depends on (ARCH_EXYNOS && EXYNOS_CHIPID) || COMPILE_TEST
+ select EXYNOS_ASV_ARM if ARM && ARCH_EXYNOS
+
+# There is no need to enable these drivers for ARMv8
+config EXYNOS_ASV_ARM
+ bool "Exynos ASV ARMv7-specific driver extensions" if COMPILE_TEST
+ depends on EXYNOS_ASV
+
config EXYNOS_CHIPID
bool "Exynos Chipid controller driver" if COMPILE_TEST
depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 3b6a8797416c..edd1d6ea064d 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_EXYNOS_ASV) += exynos-asv.o
+obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
+
obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
new file mode 100644
index 000000000000..30bb7b7cc769
--- /dev/null
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage support
+ */
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+
+#include "exynos-asv.h"
+#include "exynos5422-asv.h"
+
+#define MHZ 1000000U
+
+static int exynos_asv_update_cpu_opps(struct exynos_asv *asv,
+ struct device *cpu)
+{
+ struct exynos_asv_subsys *subsys = NULL;
+ struct dev_pm_opp *opp;
+ unsigned int opp_freq;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(asv->subsys); i++) {
+ if (of_device_is_compatible(cpu->of_node,
+ asv->subsys[i].cpu_dt_compat)) {
+ subsys = &asv->subsys[i];
+ break;
+ }
+ }
+ if (!subsys)
+ return -EINVAL;
+
+ for (i = 0; i < subsys->table.num_rows; i++) {
+ unsigned int new_volt, volt;
+ int ret;
+
+ opp_freq = exynos_asv_opp_get_frequency(subsys, i);
+
+ opp = dev_pm_opp_find_freq_exact(cpu, opp_freq * MHZ, true);
+ if (IS_ERR(opp)) {
+ dev_info(asv->dev, "cpu%d opp%d, freq: %u missing\n",
+ cpu->id, i, opp_freq);
+
+ continue;
+ }
+
+ volt = dev_pm_opp_get_voltage(opp);
+ new_volt = asv->opp_get_voltage(subsys, i, volt);
+ dev_pm_opp_put(opp);
+
+ if (new_volt == volt)
+ continue;
+
+ ret = dev_pm_opp_adjust_voltage(cpu, opp_freq * MHZ,
+ new_volt, new_volt, new_volt);
+ if (ret < 0)
+ dev_err(asv->dev,
+ "Failed to adjust OPP %u Hz/%u uV for cpu%d\n",
+ opp_freq, new_volt, cpu->id);
+ else
+ dev_dbg(asv->dev,
+ "Adjusted OPP %u Hz/%u -> %u uV, cpu%d\n",
+ opp_freq, volt, new_volt, cpu->id);
+ }
+
+ return 0;
+}
+
+static int exynos_asv_update_opps(struct exynos_asv *asv)
+{
+ struct opp_table *last_opp_table = NULL;
+ struct device *cpu;
+ int ret, cpuid;
+
+ for_each_possible_cpu(cpuid) {
+ struct opp_table *opp_table;
+
+ cpu = get_cpu_device(cpuid);
+ if (!cpu)
+ continue;
+
+ opp_table = dev_pm_opp_get_opp_table(cpu);
+ if (IS_ERR_OR_NULL(opp_table))
+ continue;
+
+ if (!last_opp_table || opp_table != last_opp_table) {
+ last_opp_table = opp_table;
+
+ ret = exynos_asv_update_cpu_opps(asv, cpu);
+ if (ret < 0)
+ dev_err(asv->dev, "Couldn't udate OPPs for cpu%d\n",
+ cpuid);
+ }
+
+ dev_pm_opp_put_opp_table(opp_table);
+ }
+
+ return 0;
+}
+
+static int exynos_asv_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct exynos_asv *asv);
+ struct exynos_asv *asv;
+ struct device *cpu_dev;
+ u32 product_id = 0;
+ int ret, i;
+
+ cpu_dev = get_cpu_device(0);
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret < 0)
+ return -EPROBE_DEFER;
+
+ asv = devm_kzalloc(&pdev->dev, sizeof(*asv), GFP_KERNEL);
+ if (!asv)
+ return -ENOMEM;
+
+ asv->chipid_regmap = device_node_to_regmap(pdev->dev.of_node);
+ if (IS_ERR(asv->chipid_regmap)) {
+ dev_err(&pdev->dev, "Could not find syscon regmap\n");
+ return PTR_ERR(asv->chipid_regmap);
+ }
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id);
+
+ switch (product_id & EXYNOS_MASK) {
+ case 0xE5422000:
+ probe_func = exynos5422_asv_init;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "samsung,asv-bin",
+ &asv->of_bin);
+ if (ret < 0)
+ asv->of_bin = -EINVAL;
+
+ asv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, asv);
+
+ for (i = 0; i < ARRAY_SIZE(asv->subsys); i++)
+ asv->subsys[i].asv = asv;
+
+ ret = probe_func(asv);
+ if (ret < 0)
+ return ret;
+
+ return exynos_asv_update_opps(asv);
+}
+
+static const struct of_device_id exynos_asv_of_device_ids[] = {
+ { .compatible = "samsung,exynos4210-chipid" },
+ {}
+};
+
+static struct platform_driver exynos_asv_driver = {
+ .driver = {
+ .name = "exynos-asv",
+ .of_match_table = exynos_asv_of_device_ids,
+ },
+ .probe = exynos_asv_probe,
+};
+module_platform_driver(exynos_asv_driver);
diff --git a/drivers/soc/samsung/exynos-asv.h b/drivers/soc/samsung/exynos-asv.h
new file mode 100644
index 000000000000..3fd1f2acd999
--- /dev/null
+++ b/drivers/soc/samsung/exynos-asv.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage support
+ */
+#ifndef __LINUX_SOC_EXYNOS_ASV_H
+#define __LINUX_SOC_EXYNOS_ASV_H
+
+struct regmap;
+
+/* HPM, IDS values to select target group */
+struct asv_limit_entry {
+ unsigned int hpm;
+ unsigned int ids;
+};
+
+struct exynos_asv_table {
+ unsigned int num_rows;
+ unsigned int num_cols;
+ u32 *buf;
+};
+
+struct exynos_asv_subsys {
+ struct exynos_asv *asv;
+ const char *cpu_dt_compat;
+ int id;
+ struct exynos_asv_table table;
+
+ unsigned int base_volt;
+ unsigned int offset_volt_h;
+ unsigned int offset_volt_l;
+};
+
+struct exynos_asv {
+ struct device *dev;
+ struct regmap *chipid_regmap;
+ struct exynos_asv_subsys subsys[2];
+
+ int (*opp_get_voltage)(const struct exynos_asv_subsys *subs,
+ int level, unsigned int voltage);
+ unsigned int group;
+ unsigned int table;
+
+ /* True if SG fields from PKG_ID register should be used */
+ bool use_sg;
+ /* ASV bin read from DT */
+ int of_bin;
+};
+
+static inline u32 __asv_get_table_entry(const struct exynos_asv_table *table,
+ unsigned int row, unsigned int col)
+{
+ return table->buf[row * (table->num_cols) + col];
+}
+
+static inline u32 exynos_asv_opp_get_voltage(const struct exynos_asv_subsys *subsys,
+ unsigned int level, unsigned int group)
+{
+ return __asv_get_table_entry(&subsys->table, level, group + 1);
+}
+
+static inline u32 exynos_asv_opp_get_frequency(const struct exynos_asv_subsys *subsys,
+ unsigned int level)
+{
+ return __asv_get_table_entry(&subsys->table, level, 0);
+}
+
+#endif /* __LINUX_SOC_EXYNOS_ASV_H */
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index c55a47cfe617..b89c26a71c6e 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -45,17 +45,25 @@ static const char * __init product_id_to_soc_id(unsigned int product_id)
return NULL;
}
-int __init exynos_chipid_early_init(void)
+static int __init exynos_chipid_early_init(void)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device_node *root;
+ struct device_node *syscon;
struct regmap *regmap;
u32 product_id;
u32 revision;
int ret;
- regmap = syscon_regmap_lookup_by_compatible("samsung,exynos4210-chipid");
+ syscon = of_find_compatible_node(NULL, NULL,
+ "samsung,exynos4210-chipid");
+ if (!syscon)
+ return ENODEV;
+
+ regmap = device_node_to_regmap(syscon);
+ of_node_put(syscon);
+
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
new file mode 100644
index 000000000000..01bb3050d678
--- /dev/null
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Exynos 5422 SoC Adaptive Supply Voltage support
+ */
+
+#include <linux/bitrev.h>
+#include <linux/errno.h>
+#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+#include <linux/slab.h>
+
+#include "exynos-asv.h"
+#include "exynos5422-asv.h"
+
+#define ASV_GROUPS_NUM 14
+#define ASV_ARM_DVFS_NUM 20
+#define ASV_ARM_BIN2_DVFS_NUM 17
+#define ASV_KFC_DVFS_NUM 14
+#define ASV_KFC_BIN2_DVFS_NUM 12
+
+/*
+ * This array is a set of 4 ASV data tables, first column of each ASV table
+ * contains frequency value in MHz and subsequent columns contain the CPU
+ * cluster's supply voltage values in uV.
+ * In order to create a set of OPPs for specific SoC revision one of the voltage
+ * columns (1...14) from one of the tables (0...3) is selected during
+ * initialization. There are separate ASV tables for the big (ARM) and little
+ * (KFC) CPU cluster. Only OPPs which are already defined in devicetree
+ * will be updated.
+ */
+
+static const u32 asv_arm_table[][ASV_ARM_DVFS_NUM][ASV_GROUPS_NUM + 1] = {
+{
+ /* ARM 0, 1 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1300000, 1287500, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000,
+ 1162500, 1150000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1800, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1700, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1075000, 1062500, 1050000, 1037500, 1025000 },
+ { 1600, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1037500, 1025000, 1012500, 1000000, 987500 },
+ { 1500, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 1000000, 987500, 975000, 962500, 950000 },
+ { 1400, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 975000, 962500, 950000, 937500, 925000 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM 2 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1312500, 1300000, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1262500, 1250000, 1250000, 1237500, 1212500, 1200000, 1187500,
+ 1175000, 1162500, 1175000, 1162500, 1150000, 1137500, 1125000 },
+ { 1800, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500,
+ 1125000, 1112500, 1125000, 1112500, 1100000, 1087500, 1075000 },
+ { 1700, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1600, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1400, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000,
+ 987500, 975000, 987500, 975000, 962500, 950000, 937500 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM 3 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1300000, 1287500, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500,
+ 1175000, 1162500, 1175000, 1162500, 1150000, 1137500, 1125000 },
+ { 1800, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500,
+ 1125000, 1112500, 1125000, 1112500, 1100000, 1087500, 1075000 },
+ { 1700, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1600, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1400, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000,
+ 987500, 975000, 987500, 975000, 962500, 950000, 937500 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM bin 2 */
+ { 1800, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500,
+ 1150000, 1137500, 1150000, 1137500, 1125000, 1112500, 1100000 },
+ { 1700, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1600, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1075000, 1062500, 1050000, 1037500, 1025000 },
+ { 1500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1037500, 1025000, 1012500, 1000000, 987500 },
+ { 1400, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1300, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 1000000, 987500, 975000, 962500, 950000 },
+ { 1200, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 975000, 962500, 950000, 937500, 925000 },
+ { 1100, 1037500, 1025000, 1012500, 1000000, 987500, 975000, 962500,
+ 950000, 937500, 950000, 937500, 925000, 912500, 900000 },
+ { 1000, 1012500, 1000000, 987500, 975000, 962500, 950000, 937500,
+ 925000, 912500, 925000, 912500, 900000, 900000, 900000 },
+ { 900, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 962500, 950000, 937500, 925000, 912500, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 937500, 925000, 912500, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}
+};
+
+static const u32 asv_kfc_table[][ASV_KFC_DVFS_NUM][ASV_GROUPS_NUM + 1] = {
+{
+ /* KFC 0, 1 */
+ { 1500000, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400000, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300000, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200000, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100000, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800000, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700000, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600000, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500000, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC 2 */
+ { 1500, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC 3 */
+ { 1500, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC bin 2 */
+ { 1300, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000,
+ 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500 },
+ { 1200, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1100, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1000, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 900, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 975000, 962500, 950000, 937500, 925000 },
+ { 800, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 937500, 925000, 912500, 900000, 900000 },
+ { 700, 1012500, 1000000, 987500, 975000, 962500, 950000, 937500,
+ 925000, 912500, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 937500, 925000, 912500, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}
+};
+
+static const struct asv_limit_entry __asv_limits[ASV_GROUPS_NUM] = {
+ { 13, 55 },
+ { 21, 65 },
+ { 25, 69 },
+ { 30, 72 },
+ { 36, 74 },
+ { 43, 76 },
+ { 51, 78 },
+ { 65, 80 },
+ { 81, 82 },
+ { 98, 84 },
+ { 119, 87 },
+ { 135, 89 },
+ { 150, 92 },
+ { 999, 999 },
+};
+
+static int exynos5422_asv_get_group(struct exynos_asv *asv)
+{
+ unsigned int pkgid_reg, auxi_reg;
+ int hpm, ids, i;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PKG_ID, &pkgid_reg);
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_AUX_INFO, &auxi_reg);
+
+ if (asv->use_sg) {
+ u32 sga = (pkgid_reg >> EXYNOS5422_SG_A_OFFSET) &
+ EXYNOS5422_SG_A_MASK;
+
+ u32 sgb = (pkgid_reg >> EXYNOS5422_SG_B_OFFSET) &
+ EXYNOS5422_SG_B_MASK;
+
+ if ((pkgid_reg >> EXYNOS5422_SG_BSIGN_OFFSET) &
+ EXYNOS5422_SG_BSIGN_MASK)
+ return sga + sgb;
+ else
+ return sga - sgb;
+ }
+
+ hpm = (auxi_reg >> EXYNOS5422_TMCB_OFFSET) & EXYNOS5422_TMCB_MASK;
+ ids = (pkgid_reg >> EXYNOS5422_IDS_OFFSET) & EXYNOS5422_IDS_MASK;
+
+ for (i = 0; i < ASV_GROUPS_NUM; i++) {
+ if (ids <= __asv_limits[i].ids)
+ break;
+ if (hpm <= __asv_limits[i].hpm)
+ break;
+ }
+ if (i < ASV_GROUPS_NUM)
+ return i;
+
+ return 0;
+}
+
+static int __asv_offset_voltage(unsigned int index)
+{
+ switch (index) {
+ case 1:
+ return 12500;
+ case 2:
+ return 50000;
+ case 3:
+ return 25000;
+ default:
+ return 0;
+ };
+}
+
+static void exynos5422_asv_offset_voltage_setup(struct exynos_asv *asv)
+{
+ struct exynos_asv_subsys *subsys;
+ unsigned int reg, value;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_AUX_INFO, &reg);
+
+ /* ARM offset voltage setup */
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_ARM];
+
+ subsys->base_volt = 1000000;
+
+ value = (reg >> EXYNOS5422_ARM_UP_OFFSET) & EXYNOS5422_ARM_UP_MASK;
+ subsys->offset_volt_h = __asv_offset_voltage(value);
+
+ value = (reg >> EXYNOS5422_ARM_DN_OFFSET) & EXYNOS5422_ARM_DN_MASK;
+ subsys->offset_volt_l = __asv_offset_voltage(value);
+
+ /* KFC offset voltage setup */
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_KFC];
+
+ subsys->base_volt = 1000000;
+
+ value = (reg >> EXYNOS5422_KFC_UP_OFFSET) & EXYNOS5422_KFC_UP_MASK;
+ subsys->offset_volt_h = __asv_offset_voltage(value);
+
+ value = (reg >> EXYNOS5422_KFC_DN_OFFSET) & EXYNOS5422_KFC_DN_MASK;
+ subsys->offset_volt_l = __asv_offset_voltage(value);
+}
+
+static int exynos5422_asv_opp_get_voltage(const struct exynos_asv_subsys *subsys,
+ int level, unsigned int volt)
+{
+ unsigned int asv_volt;
+
+ if (level >= subsys->table.num_rows)
+ return volt;
+
+ asv_volt = exynos_asv_opp_get_voltage(subsys, level,
+ subsys->asv->group);
+
+ if (volt > subsys->base_volt)
+ asv_volt += subsys->offset_volt_h;
+ else
+ asv_volt += subsys->offset_volt_l;
+
+ return asv_volt;
+}
+
+static unsigned int exynos5422_asv_parse_table(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_TABLE_OFFSET) & EXYNOS5422_TABLE_MASK;
+}
+
+static bool exynos5422_asv_parse_bin2(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_BIN2_OFFSET) & EXYNOS5422_BIN2_MASK;
+}
+
+static bool exynos5422_asv_parse_sg(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_USESG_OFFSET) & EXYNOS5422_USESG_MASK;
+}
+
+int exynos5422_asv_init(struct exynos_asv *asv)
+{
+ struct exynos_asv_subsys *subsys;
+ unsigned int table_index;
+ unsigned int pkg_id;
+ bool bin2;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PKG_ID, &pkg_id);
+
+ if (asv->of_bin == 2) {
+ bin2 = true;
+ asv->use_sg = false;
+ } else {
+ asv->use_sg = exynos5422_asv_parse_sg(pkg_id);
+ bin2 = exynos5422_asv_parse_bin2(pkg_id);
+ }
+
+ asv->group = exynos5422_asv_get_group(asv);
+ asv->table = exynos5422_asv_parse_table(pkg_id);
+
+ exynos5422_asv_offset_voltage_setup(asv);
+
+ if (bin2) {
+ table_index = 3;
+ } else {
+ if (asv->table == 2 || asv->table == 3)
+ table_index = asv->table - 1;
+ else
+ table_index = 0;
+ }
+
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_ARM];
+ subsys->cpu_dt_compat = "arm,cortex-a15";
+ if (bin2)
+ subsys->table.num_rows = ASV_ARM_BIN2_DVFS_NUM;
+ else
+ subsys->table.num_rows = ASV_ARM_DVFS_NUM;
+ subsys->table.num_cols = ASV_GROUPS_NUM + 1;
+ subsys->table.buf = (u32 *)asv_arm_table[table_index];
+
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_KFC];
+ subsys->cpu_dt_compat = "arm,cortex-a7";
+ if (bin2)
+ subsys->table.num_rows = ASV_KFC_BIN2_DVFS_NUM;
+ else
+ subsys->table.num_rows = ASV_KFC_DVFS_NUM;
+ subsys->table.num_cols = ASV_GROUPS_NUM + 1;
+ subsys->table.buf = (u32 *)asv_kfc_table[table_index];
+
+ asv->opp_get_voltage = exynos5422_asv_opp_get_voltage;
+
+ return 0;
+}
diff --git a/drivers/soc/samsung/exynos5422-asv.h b/drivers/soc/samsung/exynos5422-asv.h
new file mode 100644
index 000000000000..95a5fb1a7508
--- /dev/null
+++ b/drivers/soc/samsung/exynos5422-asv.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Exynos 5422 SoC Adaptive Supply Voltage support
+ */
+
+#ifndef __LINUX_SOC_EXYNOS5422_ASV_H
+#define __LINUX_SOC_EXYNOS5422_ASV_H
+
+#include <linux/errno.h>
+
+enum {
+ EXYNOS_ASV_SUBSYS_ID_ARM,
+ EXYNOS_ASV_SUBSYS_ID_KFC,
+ EXYNOS_ASV_SUBSYS_ID_MAX
+};
+
+struct exynos_asv;
+
+#ifdef CONFIG_EXYNOS_ASV_ARM
+int exynos5422_asv_init(struct exynos_asv *asv);
+#else
+static inline int exynos5422_asv_init(struct exynos_asv *asv)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* __LINUX_SOC_EXYNOS5422_ASV_H */
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index c8ef05d6b8c7..84bd615c4a92 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -15,6 +15,7 @@ config ARCH_TEGRA_2x_SOC
select PL310_ERRATA_769419 if CACHE_L2X0
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
+ select SOC_TEGRA20_VOLTAGE_COUPLER
select TEGRA_TIMER
help
Support for NVIDIA Tegra AP20 and T20 processors, based on the
@@ -28,6 +29,7 @@ config ARCH_TEGRA_3x_SOC
select PL310_ERRATA_769419 if CACHE_L2X0
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
+ select SOC_TEGRA30_VOLTAGE_COUPLER
select TEGRA_TIMER
help
Support for NVIDIA Tegra T30 processor family, based on the
@@ -135,3 +137,11 @@ config SOC_TEGRA_POWERGATE_BPMP
def_bool y
depends on PM_GENERIC_DOMAINS
depends on TEGRA_BPMP
+
+config SOC_TEGRA20_VOLTAGE_COUPLER
+ bool "Voltage scaling support for Tegra20 SoCs"
+ depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+
+config SOC_TEGRA30_VOLTAGE_COUPLER
+ bool "Voltage scaling support for Tegra30 SoCs"
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 902759fe5f4d..9c809c1814bd 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -5,3 +5,5 @@ obj-y += common.o
obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
obj-$(CONFIG_SOC_TEGRA_POWERGATE_BPMP) += powergate-bpmp.o
+obj-$(CONFIG_SOC_TEGRA20_VOLTAGE_COUPLER) += regulators-tegra20.o
+obj-$(CONFIG_SOC_TEGRA30_VOLTAGE_COUPLER) += regulators-tegra30.o
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
index b6bdeef33db1..eb96a3086d6d 100644
--- a/drivers/soc/tegra/flowctrl.c
+++ b/drivers/soc/tegra/flowctrl.c
@@ -91,8 +91,23 @@ void flowctrl_cpu_suspend_enter(unsigned int cpuid)
reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
/* clear wfi bitmap */
reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
- /* pwr gating on wfi */
- reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
+
+ if (tegra_get_chip_id() == TEGRA30) {
+ /*
+ * The wfi doesn't work well on Tegra30 because
+ * CPU hangs under some odd circumstances after
+ * power-gating (like memory running off PLLP),
+ * hence use wfe that is working perfectly fine.
+ * Note that Tegra30 TRM doc clearly stands that
+ * wfi should be used for the "Cluster Switching",
+ * while wfe for the power-gating, just like it
+ * is done on Tegra20.
+ */
+ reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
+ } else {
+ /* pwr gating on wfi */
+ reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
+ }
break;
}
reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 3eb44e65b326..4d719d4b8d5a 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -8,6 +8,8 @@
#include <linux/kobject.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -31,50 +33,6 @@ static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
[TEGRA_REVISION_A04] = "A04",
};
-static u8 fuse_readb(struct tegra_fuse *fuse, unsigned int offset)
-{
- u32 val;
-
- val = fuse->read(fuse, round_down(offset, 4));
- val >>= (offset % 4) * 8;
- val &= 0xff;
-
- return val;
-}
-
-static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t pos, size_t size)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct tegra_fuse *fuse = dev_get_drvdata(dev);
- int i;
-
- if (pos < 0 || pos >= attr->size)
- return 0;
-
- if (size > attr->size - pos)
- size = attr->size - pos;
-
- for (i = 0; i < size; i++)
- buf[i] = fuse_readb(fuse, pos + i);
-
- return i;
-}
-
-static struct bin_attribute fuse_bin_attr = {
- .attr = { .name = "fuse", .mode = S_IRUGO, },
- .read = fuse_read,
-};
-
-static int tegra_fuse_create_sysfs(struct device *dev, unsigned int size,
- const struct tegra_fuse_info *info)
-{
- fuse_bin_attr.size = size;
-
- return device_create_bin_file(dev, &fuse_bin_attr);
-}
-
static const struct of_device_id car_match[] __initconst = {
{ .compatible = "nvidia,tegra20-car", },
{ .compatible = "nvidia,tegra30-car", },
@@ -115,9 +73,111 @@ static const struct of_device_id tegra_fuse_match[] = {
{ /* sentinel */ }
};
+static int tegra_fuse_read(void *priv, unsigned int offset, void *value,
+ size_t bytes)
+{
+ unsigned int count = bytes / 4, i;
+ struct tegra_fuse *fuse = priv;
+ u32 *buffer = value;
+
+ for (i = 0; i < count; i++)
+ buffer[i] = fuse->read(fuse, offset + i * 4);
+
+ return 0;
+}
+
+static const struct nvmem_cell_info tegra_fuse_cells[] = {
+ {
+ .name = "tsensor-cpu1",
+ .offset = 0x084,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu2",
+ .offset = 0x088,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu0",
+ .offset = 0x098,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu3",
+ .offset = 0x12c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "sata-calibration",
+ .offset = 0x124,
+ .bytes = 1,
+ .bit_offset = 0,
+ .nbits = 2,
+ }, {
+ .name = "tsensor-gpu",
+ .offset = 0x154,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem0",
+ .offset = 0x158,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem1",
+ .offset = 0x15c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-pllx",
+ .offset = 0x160,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-common",
+ .offset = 0x180,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-realignment",
+ .offset = 0x1fc,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "gpu-calibration",
+ .offset = 0x204,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration-ext",
+ .offset = 0x250,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
static int tegra_fuse_probe(struct platform_device *pdev)
{
void __iomem *base = fuse->base;
+ struct nvmem_config nvmem;
struct resource *res;
int err;
@@ -146,20 +206,42 @@ static int tegra_fuse_probe(struct platform_device *pdev)
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
- if (err < 0) {
- fuse->base = base;
- return err;
- }
+ if (err < 0)
+ goto restore;
}
- if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
- fuse->soc->info))
- return -ENODEV;
+ memset(&nvmem, 0, sizeof(nvmem));
+ nvmem.dev = &pdev->dev;
+ nvmem.name = "fuse";
+ nvmem.id = -1;
+ nvmem.owner = THIS_MODULE;
+ nvmem.cells = tegra_fuse_cells;
+ nvmem.ncells = ARRAY_SIZE(tegra_fuse_cells);
+ nvmem.type = NVMEM_TYPE_OTP;
+ nvmem.read_only = true;
+ nvmem.root_only = true;
+ nvmem.reg_read = tegra_fuse_read;
+ nvmem.size = fuse->soc->info->size;
+ nvmem.word_size = 4;
+ nvmem.stride = 4;
+ nvmem.priv = fuse;
+
+ fuse->nvmem = devm_nvmem_register(&pdev->dev, &nvmem);
+ if (IS_ERR(fuse->nvmem)) {
+ err = PTR_ERR(fuse->nvmem);
+ dev_err(&pdev->dev, "failed to register NVMEM device: %d\n",
+ err);
+ goto restore;
+ }
/* release the early I/O memory mapping */
iounmap(base);
return 0;
+
+restore:
+ fuse->base = base;
+ return err;
}
static struct platform_driver tegra_fuse_driver = {
@@ -186,9 +268,12 @@ u32 __init tegra_fuse_read_early(unsigned int offset)
int tegra_fuse_readl(unsigned long offset, u32 *value)
{
- if (!fuse->read)
+ if (!fuse->read || !fuse->clk)
return -EPROBE_DEFER;
+ if (IS_ERR(fuse->clk))
+ return PTR_ERR(fuse->clk);
+
*value = fuse->read(fuse, offset);
return 0;
@@ -338,6 +423,15 @@ static int __init tegra_init_fuse(void)
pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+ if (fuse->soc->lookups) {
+ size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups;
+
+ fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL);
+ if (!fuse->lookups)
+ return -ENOMEM;
+
+ nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
+ }
return 0;
}
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index be9424a87173..b8daaf5b7291 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -127,6 +128,70 @@ const struct tegra_fuse_soc tegra114_fuse_soc = {
#endif
#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+static const struct nvmem_cell_lookup tegra124_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "sata-calibration",
+ .dev_id = "70020000.sata",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-realignment",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "realignment",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ },
+};
+
static const struct tegra_fuse_info tegra124_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x300,
@@ -137,10 +202,81 @@ const struct tegra_fuse_soc tegra124_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra124_init_speedo_data,
.info = &tegra124_fuse_info,
+ .lookups = tegra124_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra124_fuse_lookups),
};
#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct nvmem_cell_lookup tegra210_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "sata-calibration",
+ .dev_id = "70020000.sata",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-calibration",
+ .dev_id = "57000000.gpu",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
static const struct tegra_fuse_info tegra210_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x300,
@@ -151,10 +287,26 @@ const struct tegra_fuse_soc tegra210_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra210_init_speedo_data,
.info = &tegra210_fuse_info,
+ .lookups = tegra210_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra210_fuse_lookups),
};
#endif
#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct nvmem_cell_lookup tegra186_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
static const struct tegra_fuse_info tegra186_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x300,
@@ -164,5 +316,7 @@ static const struct tegra_fuse_info tegra186_fuse_info = {
const struct tegra_fuse_soc tegra186_fuse_soc = {
.init = tegra30_fuse_init,
.info = &tegra186_fuse_info,
+ .lookups = tegra186_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
};
#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 7230cb330503..0f74c2c34af0 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -13,6 +13,8 @@
#include <linux/dmaengine.h>
#include <linux/types.h>
+struct nvmem_cell_lookup;
+struct nvmem_device;
struct tegra_fuse;
struct tegra_fuse_info {
@@ -27,6 +29,9 @@ struct tegra_fuse_soc {
int (*probe)(struct tegra_fuse *fuse);
const struct tegra_fuse_info *info;
+
+ const struct nvmem_cell_lookup *lookups;
+ unsigned int num_lookups;
};
struct tegra_fuse {
@@ -48,6 +53,9 @@ struct tegra_fuse {
dma_addr_t phys;
u32 *virt;
} apbdma;
+
+ struct nvmem_device *nvmem;
+ struct nvmem_cell_lookup *lookups;
};
void tegra_init_revision(void);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 9f9c1c677cf4..ea0e11a09c12 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -56,8 +56,14 @@
#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */
#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
+#define PMC_CNTRL_PWRREQ_POLARITY BIT(8)
#define PMC_CNTRL_MAIN_RST BIT(4)
+#define PMC_WAKE_MASK 0x0c
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+
#define DPD_SAMPLE 0x020
#define DPD_SAMPLE_ENABLE BIT(0)
#define DPD_SAMPLE_DISABLE (0 << 0)
@@ -82,11 +88,18 @@
#define PMC_CPUPWRGOOD_TIMER 0xc8
#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_COREPWRGOOD_TIMER 0x3c
+#define PMC_COREPWROFF_TIMER 0xe0
#define PMC_PWR_DET_VALUE 0xe4
#define PMC_SCRATCH41 0x140
+#define PMC_WAKE2_MASK 0x160
+#define PMC_WAKE2_LEVEL 0x164
+#define PMC_WAKE2_STATUS 0x168
+#define PMC_SW_WAKE2_STATUS 0x16c
+
#define PMC_SENSOR_CTRL 0x1b0
#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
@@ -226,6 +239,8 @@ struct tegra_pmc_soc {
void (*setup_irq_polarity)(struct tegra_pmc *pmc,
struct device_node *np,
bool invert);
+ int (*irq_set_wake)(struct irq_data *data, unsigned int on);
+ int (*irq_set_type)(struct irq_data *data, unsigned int type);
const char * const *reset_sources;
unsigned int num_reset_sources;
@@ -309,6 +324,7 @@ static const char * const tegra210_reset_sources[] = {
* @pctl_dev: pin controller exposed by the PMC
* @domain: IRQ domain provided by the PMC
* @irq: chip implementation for the IRQ domain
+ * @clk_nb: pclk clock changes handler
*/
struct tegra_pmc {
struct device *dev;
@@ -344,6 +360,8 @@ struct tegra_pmc {
struct irq_domain *domain;
struct irq_chip irq;
+
+ struct notifier_block clk_nb;
};
static struct tegra_pmc *pmc = &(struct tegra_pmc) {
@@ -1192,7 +1210,7 @@ static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id,
return err;
if (pmc->clk) {
- rate = clk_get_rate(pmc->clk);
+ rate = pmc->rate;
if (!rate) {
dev_err(pmc->dev, "failed to get clock rate\n");
return -ENODEV;
@@ -1433,6 +1451,7 @@ void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
{
unsigned long long rate = 0;
+ u64 ticks;
u32 value;
switch (mode) {
@@ -1441,7 +1460,7 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
break;
case TEGRA_SUSPEND_LP2:
- rate = clk_get_rate(pmc->clk);
+ rate = pmc->rate;
break;
default:
@@ -1451,21 +1470,13 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
if (WARN_ON_ONCE(rate == 0))
rate = 100000000;
- if (rate != pmc->rate) {
- u64 ticks;
-
- ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
- do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
+ ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
- ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
- do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
-
- wmb();
-
- pmc->rate = rate;
- }
+ ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
value = tegra_pmc_readl(pmc, PMC_CNTRL);
value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
@@ -1899,6 +1910,20 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
event->id,
&pmc->irq, pmc);
+ /*
+ * GPIOs don't have an equivalent interrupt in the
+ * parent controller (GIC). However some code, such
+ * as the one in irq_get_irqchip_state(), require a
+ * valid IRQ chip to be set. Make sure that's the
+ * case by passing NULL here, which will install a
+ * dummy IRQ chip for the interrupt in the parent
+ * domain.
+ */
+ if (domain->parent)
+ irq_domain_set_hwirq_and_chip(domain->parent,
+ virq, 0, NULL,
+ NULL);
+
break;
}
}
@@ -1908,10 +1933,22 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
* dummy hardware IRQ number. This is used in the ->irq_set_type()
* and ->irq_set_wake() callbacks to return early for these IRQs.
*/
- if (i == soc->num_wake_events)
+ if (i == soc->num_wake_events) {
err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
&pmc->irq, pmc);
+ /*
+ * Interrupts without a wake event don't have a corresponding
+ * interrupt in the parent controller (GIC). Pass NULL for the
+ * chip here, which causes a dummy IRQ chip to be installed
+ * for the interrupt in the parent domain, to make this
+ * explicit.
+ */
+ if (domain->parent)
+ irq_domain_set_hwirq_and_chip(domain->parent, virq, 0,
+ NULL, NULL);
+ }
+
return err;
}
@@ -1920,7 +1957,87 @@ static const struct irq_domain_ops tegra_pmc_irq_domain_ops = {
.alloc = tegra_pmc_irq_alloc,
};
-static int tegra_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
+static int tegra210_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ unsigned int offset, bit;
+ u32 value;
+
+ if (data->hwirq == ULONG_MAX)
+ return 0;
+
+ offset = data->hwirq / 32;
+ bit = data->hwirq % 32;
+
+ /* clear wake status */
+ tegra_pmc_writel(pmc, 0, PMC_SW_WAKE_STATUS);
+ tegra_pmc_writel(pmc, 0, PMC_SW_WAKE2_STATUS);
+
+ tegra_pmc_writel(pmc, 0, PMC_WAKE_STATUS);
+ tegra_pmc_writel(pmc, 0, PMC_WAKE2_STATUS);
+
+ /* enable PMC wake */
+ if (data->hwirq >= 32)
+ offset = PMC_WAKE2_MASK;
+ else
+ offset = PMC_WAKE_MASK;
+
+ value = tegra_pmc_readl(pmc, offset);
+
+ if (on)
+ value |= BIT(bit);
+ else
+ value &= ~BIT(bit);
+
+ tegra_pmc_writel(pmc, value, offset);
+
+ return 0;
+}
+
+static int tegra210_pmc_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ unsigned int offset, bit;
+ u32 value;
+
+ if (data->hwirq == ULONG_MAX)
+ return 0;
+
+ offset = data->hwirq / 32;
+ bit = data->hwirq % 32;
+
+ if (data->hwirq >= 32)
+ offset = PMC_WAKE2_LEVEL;
+ else
+ offset = PMC_WAKE_LEVEL;
+
+ value = tegra_pmc_readl(pmc, offset);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ value |= BIT(bit);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ value &= ~BIT(bit);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING:
+ value ^= BIT(bit);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tegra_pmc_writel(pmc, value, offset);
+
+ return 0;
+}
+
+static int tegra186_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
unsigned int offset, bit;
@@ -1952,7 +2069,7 @@ static int tegra_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
-static int tegra_pmc_irq_set_type(struct irq_data *data, unsigned int type)
+static int tegra186_pmc_irq_set_type(struct irq_data *data, unsigned int type)
{
struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
u32 value;
@@ -2006,8 +2123,8 @@ static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
pmc->irq.irq_unmask = irq_chip_unmask_parent;
pmc->irq.irq_eoi = irq_chip_eoi_parent;
pmc->irq.irq_set_affinity = irq_chip_set_affinity_parent;
- pmc->irq.irq_set_type = tegra_pmc_irq_set_type;
- pmc->irq.irq_set_wake = tegra_pmc_irq_set_wake;
+ pmc->irq.irq_set_type = pmc->soc->irq_set_type;
+ pmc->irq.irq_set_wake = pmc->soc->irq_set_wake;
pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node,
&tegra_pmc_irq_domain_ops, pmc);
@@ -2019,6 +2136,33 @@ static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
return 0;
}
+static int tegra_pmc_clk_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ struct tegra_pmc *pmc = container_of(nb, struct tegra_pmc, clk_nb);
+ struct clk_notifier_data *data = ptr;
+
+ switch (action) {
+ case PRE_RATE_CHANGE:
+ mutex_lock(&pmc->powergates_lock);
+ break;
+
+ case POST_RATE_CHANGE:
+ pmc->rate = data->new_rate;
+ /* fall through */
+
+ case ABORT_RATE_CHANGE:
+ mutex_unlock(&pmc->powergates_lock);
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ return notifier_from_errno(-EINVAL);
+ }
+
+ return NOTIFY_OK;
+}
+
static int tegra_pmc_probe(struct platform_device *pdev)
{
void __iomem *base;
@@ -2082,6 +2226,23 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->clk = NULL;
}
+ /*
+ * PCLK clock rate can't be retrieved using CLK API because it
+ * causes lockup if CPU enters LP2 idle state from some other
+ * CLK notifier, hence we're caching the rate's value locally.
+ */
+ if (pmc->clk) {
+ pmc->clk_nb.notifier_call = tegra_pmc_clk_notify_cb;
+ err = clk_notifier_register(pmc->clk, &pmc->clk_nb);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register clk notifier\n");
+ return err;
+ }
+
+ pmc->rate = clk_get_rate(pmc->clk);
+ }
+
pmc->dev = &pdev->dev;
tegra_pmc_init(pmc);
@@ -2133,6 +2294,8 @@ cleanup_debugfs:
cleanup_sysfs:
device_remove_file(&pdev->dev, &dev_attr_reset_reason);
device_remove_file(&pdev->dev, &dev_attr_reset_level);
+ clk_notifier_unregister(pmc->clk, &pmc->clk_nb);
+
return err;
}
@@ -2184,7 +2347,7 @@ static const struct tegra_pmc_regs tegra20_pmc_regs = {
static void tegra20_pmc_init(struct tegra_pmc *pmc)
{
- u32 value;
+ u32 value, osc, pmu, off;
/* Always enable CPU power request */
value = tegra_pmc_readl(pmc, PMC_CNTRL);
@@ -2198,6 +2361,11 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
else
value |= PMC_CNTRL_SYSCLK_POLARITY;
+ if (pmc->corereq_high)
+ value &= ~PMC_CNTRL_PWRREQ_POLARITY;
+ else
+ value |= PMC_CNTRL_PWRREQ_POLARITY;
+
/* configure the output polarity while the request is tristated */
tegra_pmc_writel(pmc, value, PMC_CNTRL);
@@ -2205,6 +2373,16 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_SYSCLK_OE;
tegra_pmc_writel(pmc, value, PMC_CNTRL);
+
+ /* program core timings which are applicable only for suspend state */
+ if (pmc->suspend_mode != TEGRA_SUSPEND_NONE) {
+ osc = DIV_ROUND_UP(pmc->core_osc_time * 8192, 1000000);
+ pmu = DIV_ROUND_UP(pmc->core_pmu_time * 32768, 1000000);
+ off = DIV_ROUND_UP(pmc->core_off_time * 32768, 1000000);
+ tegra_pmc_writel(pmc, ((osc << 8) & 0xff00) | (pmu & 0xff),
+ PMC_COREPWRGOOD_TIMER);
+ tegra_pmc_writel(pmc, off, PMC_COREPWROFF_TIMER);
+ }
}
static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -2538,6 +2716,10 @@ static const struct pinctrl_pin_desc tegra210_pin_descs[] = {
TEGRA210_IO_PAD_TABLE(TEGRA_IO_PIN_DESC)
};
+static const struct tegra_wake_event tegra210_wake_events[] = {
+ TEGRA_WAKE_IRQ("rtc", 16, 2),
+};
+
static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra210_powergates),
.powergates = tegra210_powergates,
@@ -2555,10 +2737,14 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .irq_set_wake = tegra210_pmc_irq_set_wake,
+ .irq_set_type = tegra210_pmc_irq_set_type,
.reset_sources = tegra210_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra210_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .num_wake_events = ARRAY_SIZE(tegra210_wake_events),
+ .wake_events = tegra210_wake_events,
};
#define TEGRA186_IO_PAD_TABLE(_pad) \
@@ -2618,7 +2804,7 @@ static const struct tegra_pmc_regs tegra186_pmc_regs = {
.dpd2_status = 0x80,
.rst_status = 0x70,
.rst_source_shift = 0x2,
- .rst_source_mask = 0x3C,
+ .rst_source_mask = 0x3c,
.rst_level_shift = 0x0,
.rst_level_mask = 0x3,
};
@@ -2680,6 +2866,8 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.regs = &tegra186_pmc_regs,
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
.reset_sources = tegra186_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra186_reset_sources),
.reset_levels = tegra186_reset_levels,
@@ -2738,6 +2926,43 @@ static const struct tegra_io_pad_soc tegra194_io_pads[] = {
{ .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = UINT_MAX },
};
+static const struct tegra_pmc_regs tegra194_pmc_regs = {
+ .scratch0 = 0x2000,
+ .dpd_req = 0x74,
+ .dpd_status = 0x78,
+ .dpd2_req = 0x7c,
+ .dpd2_status = 0x80,
+ .rst_status = 0x70,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0x7c,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static const char * const tegra194_reset_sources[] = {
+ "SYS_RESET_N",
+ "AOWDT",
+ "BCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "LCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "MAINSWRST",
+ "SC7",
+ "HSM",
+ "CSITE",
+ "RCEWDT",
+ "PVA0WDT",
+ "PVA1WDT",
+ "L1A_ASYNC",
+ "BPMPBOOT",
+ "FUSECRC",
+};
+
static const struct tegra_wake_event tegra194_wake_events[] = {
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
@@ -2755,9 +2980,15 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra194_io_pads),
.io_pads = tegra194_io_pads,
- .regs = &tegra186_pmc_regs,
+ .regs = &tegra194_pmc_regs,
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra194_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra194_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra194_wake_events),
.wake_events = tegra194_wake_events,
};
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
new file mode 100644
index 000000000000..ea0eede48802
--- /dev/null
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Voltage regulators coupler for NVIDIA Tegra20
+ * Copyright (C) 2019 GRATE-DRIVER project
+ *
+ * Voltage constraints borrowed from downstream kernel sources
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ */
+
+#define pr_fmt(fmt) "tegra voltage-coupler: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+struct tegra_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *core_rdev;
+ struct regulator_dev *cpu_rdev;
+ struct regulator_dev *rtc_rdev;
+ int core_min_uV;
+};
+
+static inline struct tegra_regulator_coupler *
+to_tegra_coupler(struct regulator_coupler *coupler)
+{
+ return container_of(coupler, struct tegra_regulator_coupler, coupler);
+}
+
+static int tegra20_core_limit(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV = 0;
+ int core_max_uV;
+ int core_cur_uV;
+ int err;
+
+ if (tegra->core_min_uV > 0)
+ return tegra->core_min_uV;
+
+ core_cur_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_cur_uV < 0)
+ return core_cur_uV;
+
+ core_max_uV = max(core_cur_uV, 1200000);
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ /*
+ * Limit minimum CORE voltage to a value left from bootloader or,
+ * if it's unreasonably low value, to the most common 1.2v or to
+ * whatever maximum value defined via board's device-tree.
+ */
+ tegra->core_min_uV = core_max_uV;
+
+ pr_info("core minimum voltage limited to %duV\n", tegra->core_min_uV);
+
+ return tegra->core_min_uV;
+}
+
+static int tegra20_core_rtc_max_spread(struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ struct coupling_desc *c_desc = &core_rdev->coupling_desc;
+ struct regulator_dev *rdev;
+ int max_spread;
+ unsigned int i;
+
+ for (i = 1; i < c_desc->n_coupled; i++) {
+ max_spread = core_rdev->constraints->max_spread[i - 1];
+ rdev = c_desc->coupled_rdevs[i];
+
+ if (rdev == rtc_rdev && max_spread)
+ return max_spread;
+ }
+
+ pr_err_once("rtc-core max-spread is undefined in device-tree\n");
+
+ return 150000;
+}
+
+static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev,
+ int cpu_uV, int cpu_min_uV)
+{
+ int core_min_uV, core_max_uV = INT_MAX;
+ int rtc_min_uV, rtc_max_uV = INT_MAX;
+ int core_target_uV;
+ int rtc_target_uV;
+ int max_spread;
+ int core_uV;
+ int rtc_uV;
+ int err;
+
+ /*
+ * RTC and CORE voltages should be no more than 170mV from each other,
+ * CPU should be below RTC and CORE by at least 120mV. This applies
+ * to all Tegra20 SoC's.
+ */
+ max_spread = tegra20_core_rtc_max_spread(core_rdev, rtc_rdev);
+
+ /*
+ * The core voltage scaling is currently not hooked up in drivers,
+ * hence we will limit the minimum core voltage to a reasonable value.
+ * This should be good enough for the time being.
+ */
+ core_min_uV = tegra20_core_limit(tegra, core_rdev);
+ if (core_min_uV < 0)
+ return core_min_uV;
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(core_rdev, &core_min_uV, &core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_uV < 0)
+ return core_uV;
+
+ core_min_uV = max(cpu_min_uV + 125000, core_min_uV);
+ if (core_min_uV > core_max_uV)
+ return -EINVAL;
+
+ if (cpu_uV + 120000 > core_uV)
+ pr_err("core-cpu voltage constraint violated: %d %d\n",
+ core_uV, cpu_uV + 120000);
+
+ rtc_uV = regulator_get_voltage_rdev(rtc_rdev);
+ if (rtc_uV < 0)
+ return rtc_uV;
+
+ if (cpu_uV + 120000 > rtc_uV)
+ pr_err("rtc-cpu voltage constraint violated: %d %d\n",
+ rtc_uV, cpu_uV + 120000);
+
+ if (abs(core_uV - rtc_uV) > 170000)
+ pr_err("core-rtc voltage constraint violated: %d %d\n",
+ core_uV, rtc_uV);
+
+ rtc_min_uV = max(cpu_min_uV + 125000, core_min_uV - max_spread);
+
+ err = regulator_check_voltage(rtc_rdev, &rtc_min_uV, &rtc_max_uV);
+ if (err)
+ return err;
+
+ while (core_uV != core_min_uV || rtc_uV != rtc_min_uV) {
+ if (core_uV < core_min_uV) {
+ core_target_uV = min(core_uV + max_spread, core_min_uV);
+ core_target_uV = min(rtc_uV + max_spread, core_target_uV);
+ } else {
+ core_target_uV = max(core_uV - max_spread, core_min_uV);
+ core_target_uV = max(rtc_uV - max_spread, core_target_uV);
+ }
+
+ err = regulator_set_voltage_rdev(core_rdev,
+ core_target_uV,
+ core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = core_target_uV;
+
+ if (rtc_uV < rtc_min_uV) {
+ rtc_target_uV = min(rtc_uV + max_spread, rtc_min_uV);
+ rtc_target_uV = min(core_uV + max_spread, rtc_target_uV);
+ } else {
+ rtc_target_uV = max(rtc_uV - max_spread, rtc_min_uV);
+ rtc_target_uV = max(core_uV - max_spread, rtc_target_uV);
+ }
+
+ err = regulator_set_voltage_rdev(rtc_rdev,
+ rtc_target_uV,
+ rtc_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ rtc_uV = rtc_target_uV;
+ }
+
+ return 0;
+}
+
+static int tegra20_core_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ int cpu_uV;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ return tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_uV);
+}
+
+static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ int cpu_min_uV_consumers = 0;
+ int cpu_max_uV = INT_MAX;
+ int cpu_min_uV = 0;
+ int cpu_uV;
+ int err;
+
+ err = regulator_check_voltage(cpu_rdev, &cpu_min_uV, &cpu_max_uV);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV, &cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV_consumers,
+ &cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ /*
+ * CPU's regulator may not have any consumers, hence the voltage
+ * must not be changed in that case because CPU simply won't
+ * survive the voltage drop if it's running on a higher frequency.
+ */
+ if (!cpu_min_uV_consumers)
+ cpu_min_uV = cpu_uV;
+
+ if (cpu_min_uV > cpu_uV) {
+ err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_min_uV);
+ if (err)
+ return err;
+
+ err = regulator_set_voltage_rdev(cpu_rdev, cpu_min_uV,
+ cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+ } else if (cpu_min_uV < cpu_uV) {
+ err = regulator_set_voltage_rdev(cpu_rdev, cpu_min_uV,
+ cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_min_uV);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct regulator_dev *core_rdev = tegra->core_rdev;
+ struct regulator_dev *cpu_rdev = tegra->cpu_rdev;
+ struct regulator_dev *rtc_rdev = tegra->rtc_rdev;
+
+ if ((core_rdev != rdev && cpu_rdev != rdev && rtc_rdev != rdev) ||
+ state != PM_SUSPEND_ON) {
+ pr_err("regulators are not coupled properly\n");
+ return -EINVAL;
+ }
+
+ if (rdev == cpu_rdev)
+ return tegra20_cpu_voltage_update(tegra, cpu_rdev,
+ core_rdev, rtc_rdev);
+
+ if (rdev == core_rdev)
+ return tegra20_core_voltage_update(tegra, cpu_rdev,
+ core_rdev, rtc_rdev);
+
+ pr_err("changing %s voltage not permitted\n", rdev_get_name(rtc_rdev));
+
+ return -EPERM;
+}
+
+static int tegra20_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct device_node *np = rdev->dev.of_node;
+
+ if (of_property_read_bool(np, "nvidia,tegra-core-regulator") &&
+ !tegra->core_rdev) {
+ tegra->core_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-rtc-regulator") &&
+ !tegra->rtc_rdev) {
+ tegra->rtc_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-cpu-regulator") &&
+ !tegra->cpu_rdev) {
+ tegra->cpu_rdev = rdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra20_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+
+ if (tegra->core_rdev == rdev) {
+ tegra->core_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->rtc_rdev == rdev) {
+ tegra->rtc_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->cpu_rdev == rdev) {
+ tegra->cpu_rdev = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct tegra_regulator_coupler tegra20_coupler = {
+ .coupler = {
+ .attach_regulator = tegra20_regulator_attach,
+ .detach_regulator = tegra20_regulator_detach,
+ .balance_voltage = tegra20_regulator_balance_voltage,
+ },
+};
+
+static int __init tegra_regulator_coupler_init(void)
+{
+ if (!of_machine_is_compatible("nvidia,tegra20"))
+ return 0;
+
+ return regulator_coupler_register(&tegra20_coupler.coupler);
+}
+arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
new file mode 100644
index 000000000000..8e623ff18e70
--- /dev/null
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Voltage regulators coupler for NVIDIA Tegra30
+ * Copyright (C) 2019 GRATE-DRIVER project
+ *
+ * Voltage constraints borrowed from downstream kernel sources
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ */
+
+#define pr_fmt(fmt) "tegra voltage-coupler: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <soc/tegra/fuse.h>
+
+struct tegra_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *core_rdev;
+ struct regulator_dev *cpu_rdev;
+ int core_min_uV;
+};
+
+static inline struct tegra_regulator_coupler *
+to_tegra_coupler(struct regulator_coupler *coupler)
+{
+ return container_of(coupler, struct tegra_regulator_coupler, coupler);
+}
+
+static int tegra30_core_limit(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV = 0;
+ int core_max_uV;
+ int core_cur_uV;
+ int err;
+
+ if (tegra->core_min_uV > 0)
+ return tegra->core_min_uV;
+
+ core_cur_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_cur_uV < 0)
+ return core_cur_uV;
+
+ core_max_uV = max(core_cur_uV, 1200000);
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ /*
+ * Limit minimum CORE voltage to a value left from bootloader or,
+ * if it's unreasonably low value, to the most common 1.2v or to
+ * whatever maximum value defined via board's device-tree.
+ */
+ tegra->core_min_uV = core_max_uV;
+
+ pr_info("core minimum voltage limited to %duV\n", tegra->core_min_uV);
+
+ return tegra->core_min_uV;
+}
+
+static int tegra30_core_cpu_limit(int cpu_uV)
+{
+ if (cpu_uV < 800000)
+ return 950000;
+
+ if (cpu_uV < 900000)
+ return 1000000;
+
+ if (cpu_uV < 1000000)
+ return 1100000;
+
+ if (cpu_uV < 1100000)
+ return 1200000;
+
+ if (cpu_uV < 1250000) {
+ switch (tegra_sku_info.cpu_speedo_id) {
+ case 0 ... 1:
+ case 4:
+ case 7 ... 8:
+ return 1200000;
+
+ default:
+ return 1300000;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV, core_max_uV = INT_MAX;
+ int cpu_min_uV, cpu_max_uV = INT_MAX;
+ int cpu_min_uV_consumers = 0;
+ int core_min_limited_uV;
+ int core_target_uV;
+ int cpu_target_uV;
+ int core_max_step;
+ int cpu_max_step;
+ int max_spread;
+ int core_uV;
+ int cpu_uV;
+ int err;
+
+ /*
+ * CPU voltage should not got lower than 300mV from the CORE.
+ * CPU voltage should stay below the CORE by 100mV+, depending
+ * by the CORE voltage. This applies to all Tegra30 SoC's.
+ */
+ max_spread = cpu_rdev->constraints->max_spread[0];
+ cpu_max_step = cpu_rdev->constraints->max_uV_step;
+ core_max_step = core_rdev->constraints->max_uV_step;
+
+ if (!max_spread) {
+ pr_err_once("cpu-core max-spread is undefined in device-tree\n");
+ max_spread = 300000;
+ }
+
+ if (!cpu_max_step) {
+ pr_err_once("cpu max-step is undefined in device-tree\n");
+ cpu_max_step = 150000;
+ }
+
+ if (!core_max_step) {
+ pr_err_once("core max-step is undefined in device-tree\n");
+ core_max_step = 150000;
+ }
+
+ /*
+ * The CORE voltage scaling is currently not hooked up in drivers,
+ * hence we will limit the minimum CORE voltage to a reasonable value.
+ * This should be good enough for the time being.
+ */
+ core_min_uV = tegra30_core_limit(tegra, core_rdev);
+ if (core_min_uV < 0)
+ return core_min_uV;
+
+ err = regulator_check_consumers(core_rdev, &core_min_uV, &core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_uV < 0)
+ return core_uV;
+
+ cpu_min_uV = core_min_uV - max_spread;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV, &cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV_consumers,
+ &cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_voltage(cpu_rdev, &cpu_min_uV, &cpu_max_uV);
+ if (err)
+ return err;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ /*
+ * CPU's regulator may not have any consumers, hence the voltage
+ * must not be changed in that case because CPU simply won't
+ * survive the voltage drop if it's running on a higher frequency.
+ */
+ if (!cpu_min_uV_consumers)
+ cpu_min_uV = cpu_uV;
+
+ /*
+ * Bootloader shall set up voltages correctly, but if it
+ * happens that there is a violation, then try to fix it
+ * at first.
+ */
+ core_min_limited_uV = tegra30_core_cpu_limit(cpu_uV);
+ if (core_min_limited_uV < 0)
+ return core_min_limited_uV;
+
+ core_min_uV = max(core_min_uV, tegra30_core_cpu_limit(cpu_min_uV));
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ if (core_min_limited_uV > core_uV) {
+ pr_err("core voltage constraint violated: %d %d %d\n",
+ core_uV, core_min_limited_uV, cpu_uV);
+ goto update_core;
+ }
+
+ while (cpu_uV != cpu_min_uV || core_uV != core_min_uV) {
+ if (cpu_uV < cpu_min_uV) {
+ cpu_target_uV = min(cpu_uV + cpu_max_step, cpu_min_uV);
+ } else {
+ cpu_target_uV = max(cpu_uV - cpu_max_step, cpu_min_uV);
+ cpu_target_uV = max(core_uV - max_spread, cpu_target_uV);
+ }
+
+ err = regulator_set_voltage_rdev(cpu_rdev,
+ cpu_target_uV,
+ cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ cpu_uV = cpu_target_uV;
+update_core:
+ core_min_limited_uV = tegra30_core_cpu_limit(cpu_uV);
+ if (core_min_limited_uV < 0)
+ return core_min_limited_uV;
+
+ core_target_uV = max(core_min_limited_uV, core_min_uV);
+
+ if (core_uV < core_target_uV) {
+ core_target_uV = min(core_target_uV, core_uV + core_max_step);
+ core_target_uV = min(core_target_uV, cpu_uV + max_spread);
+ } else {
+ core_target_uV = max(core_target_uV, core_uV - core_max_step);
+ }
+
+ err = regulator_set_voltage_rdev(core_rdev,
+ core_target_uV,
+ core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = core_target_uV;
+ }
+
+ return 0;
+}
+
+static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct regulator_dev *core_rdev = tegra->core_rdev;
+ struct regulator_dev *cpu_rdev = tegra->cpu_rdev;
+
+ if ((core_rdev != rdev && cpu_rdev != rdev) || state != PM_SUSPEND_ON) {
+ pr_err("regulators are not coupled properly\n");
+ return -EINVAL;
+ }
+
+ return tegra30_voltage_update(tegra, cpu_rdev, core_rdev);
+}
+
+static int tegra30_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct device_node *np = rdev->dev.of_node;
+
+ if (of_property_read_bool(np, "nvidia,tegra-core-regulator") &&
+ !tegra->core_rdev) {
+ tegra->core_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-cpu-regulator") &&
+ !tegra->cpu_rdev) {
+ tegra->cpu_rdev = rdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra30_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+
+ if (tegra->core_rdev == rdev) {
+ tegra->core_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->cpu_rdev == rdev) {
+ tegra->cpu_rdev = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct tegra_regulator_coupler tegra30_coupler = {
+ .coupler = {
+ .attach_regulator = tegra30_regulator_attach,
+ .detach_regulator = tegra30_regulator_detach,
+ .balance_voltage = tegra30_regulator_balance_voltage,
+ },
+};
+
+static int __init tegra_regulator_coupler_init(void)
+{
+ if (!of_machine_is_compatible("nvidia,tegra30"))
+ return 0;
+
+ return regulator_coupler_register(&tegra30_coupler.coupler);
+}
+arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index b3868d392d4f..788b5cd1e180 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
obj-$(CONFIG_AMX3_PM) += pm33xx.o
+obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
new file mode 100644
index 000000000000..96c6f777519c
--- /dev/null
+++ b/drivers/soc/ti/omap_prm.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP2+ PRM driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/delay.h>
+
+#include <linux/platform_data/ti-prm.h>
+
+struct omap_rst_map {
+ s8 rst;
+ s8 st;
+};
+
+struct omap_prm_data {
+ u32 base;
+ const char *name;
+ const char *clkdm_name;
+ u16 rstctrl;
+ u16 rstst;
+ const struct omap_rst_map *rstmap;
+ u8 flags;
+};
+
+struct omap_prm {
+ const struct omap_prm_data *data;
+ void __iomem *base;
+};
+
+struct omap_reset_data {
+ struct reset_controller_dev rcdev;
+ struct omap_prm *prm;
+ u32 mask;
+ spinlock_t lock;
+ struct clockdomain *clkdm;
+ struct device *dev;
+};
+
+#define to_omap_reset_data(p) container_of((p), struct omap_reset_data, rcdev)
+
+#define OMAP_MAX_RESETS 8
+#define OMAP_RESET_MAX_WAIT 10000
+
+#define OMAP_PRM_HAS_RSTCTRL BIT(0)
+#define OMAP_PRM_HAS_RSTST BIT(1)
+#define OMAP_PRM_HAS_NO_CLKDM BIT(2)
+
+#define OMAP_PRM_HAS_RESETS (OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_RSTST)
+
+static const struct omap_rst_map rst_map_0[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map rst_map_01[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = 1, .st = 1 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map rst_map_012[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = 1, .st = 1 },
+ { .rst = 2, .st = 2 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data omap4_prm_data[] = {
+ { .name = "tesla", .base = 0x4a306400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "core", .base = 0x4a306700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati", .rstmap = rst_map_012 },
+ { .name = "ivahd", .base = 0x4a306f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
+ { .name = "device", .base = 0x4a307b00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { },
+};
+
+static const struct omap_prm_data omap5_prm_data[] = {
+ { .name = "dsp", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu", .rstmap = rst_map_012 },
+ { .name = "iva", .base = 0x4ae07200, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
+ { .name = "device", .base = 0x4ae07c00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { },
+};
+
+static const struct omap_prm_data dra7_prm_data[] = {
+ { .name = "dsp1", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "ipu", .base = 0x4ae06500, .rstctrl = 0x10, .rstst = 0x14, .clkdm_name = "ipu1", .rstmap = rst_map_012 },
+ { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu2", .rstmap = rst_map_012 },
+ { .name = "iva", .base = 0x4ae06f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
+ { .name = "dsp2", .base = 0x4ae07b00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve1", .base = 0x4ae07b40, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve2", .base = 0x4ae07b80, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve3", .base = 0x4ae07bc0, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve4", .base = 0x4ae07c00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { },
+};
+
+static const struct omap_rst_map am3_per_rst_map[] = {
+ { .rst = 1 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map am3_wkup_rst_map[] = {
+ { .rst = 3, .st = 5 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data am3_prm_data[] = {
+ { .name = "per", .base = 0x44e00c00, .rstctrl = 0x0, .rstmap = am3_per_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp" },
+ { .name = "wkup", .base = 0x44e00d00, .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { .name = "device", .base = 0x44e00f00, .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { .name = "gfx", .base = 0x44e01100, .rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3" },
+ { },
+};
+
+static const struct omap_rst_map am4_per_rst_map[] = {
+ { .rst = 1, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map am4_device_rst_map[] = {
+ { .rst = 0, .st = 1 },
+ { .rst = 1, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data am4_prm_data[] = {
+ { .name = "gfx", .base = 0x44df0400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3" },
+ { .name = "per", .base = 0x44df0800, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map, .clkdm_name = "pruss_ocp" },
+ { .name = "wkup", .base = 0x44df2000, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_NO_CLKDM },
+ { .name = "device", .base = 0x44df4000, .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { },
+};
+
+static const struct of_device_id omap_prm_id_table[] = {
+ { .compatible = "ti,omap4-prm-inst", .data = omap4_prm_data },
+ { .compatible = "ti,omap5-prm-inst", .data = omap5_prm_data },
+ { .compatible = "ti,dra7-prm-inst", .data = dra7_prm_data },
+ { .compatible = "ti,am3-prm-inst", .data = am3_prm_data },
+ { .compatible = "ti,am4-prm-inst", .data = am4_prm_data },
+ { },
+};
+
+static bool _is_valid_reset(struct omap_reset_data *reset, unsigned long id)
+{
+ if (reset->mask & BIT(id))
+ return true;
+
+ return false;
+}
+
+static int omap_reset_get_st_bit(struct omap_reset_data *reset,
+ unsigned long id)
+{
+ const struct omap_rst_map *map = reset->prm->data->rstmap;
+
+ while (map->rst >= 0) {
+ if (map->rst == id)
+ return map->st;
+
+ map++;
+ }
+
+ return id;
+}
+
+static int omap_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ int st_bit = omap_reset_get_st_bit(reset, id);
+ bool has_rstst = reset->prm->data->rstst ||
+ (reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
+
+ /* Check if we have rstst */
+ if (!has_rstst)
+ return -ENOTSUPP;
+
+ /* Check if hw reset line is asserted */
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if (v & BIT(id))
+ return 1;
+
+ /*
+ * Check reset status, high value means reset sequence has been
+ * completed successfully so we can return 0 here (reset deasserted)
+ */
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstst);
+ v >>= st_bit;
+ v &= 1;
+
+ return !v;
+}
+
+static int omap_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ unsigned long flags;
+
+ /* assert the reset control line */
+ spin_lock_irqsave(&reset->lock, flags);
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ v |= 1 << id;
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int omap_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ int st_bit;
+ bool has_rstst;
+ unsigned long flags;
+ struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev);
+ int ret = 0;
+
+ has_rstst = reset->prm->data->rstst ||
+ (reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
+
+ if (has_rstst) {
+ st_bit = omap_reset_get_st_bit(reset, id);
+
+ /* Clear the reset status by writing 1 to the status bit */
+ v = 1 << st_bit;
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstst);
+ }
+
+ if (reset->clkdm)
+ pdata->clkdm_deny_idle(reset->clkdm);
+
+ /* de-assert the reset control line */
+ spin_lock_irqsave(&reset->lock, flags);
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ v &= ~(1 << id);
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ if (!has_rstst)
+ goto exit;
+
+ /* wait for the status to be set */
+ ret = readl_relaxed_poll_timeout(reset->prm->base +
+ reset->prm->data->rstst,
+ v, v & BIT(st_bit), 1,
+ OMAP_RESET_MAX_WAIT);
+ if (ret)
+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ reset->prm->data->name, id);
+
+exit:
+ if (reset->clkdm)
+ pdata->clkdm_allow_idle(reset->clkdm);
+
+ return ret;
+}
+
+static const struct reset_control_ops omap_reset_ops = {
+ .assert = omap_reset_assert,
+ .deassert = omap_reset_deassert,
+ .status = omap_reset_status,
+};
+
+static int omap_prm_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+
+ if (!_is_valid_reset(reset, reset_spec->args[0]))
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+
+static int omap_prm_reset_init(struct platform_device *pdev,
+ struct omap_prm *prm)
+{
+ struct omap_reset_data *reset;
+ const struct omap_rst_map *map;
+ struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ char buf[32];
+
+ /*
+ * Check if we have controllable resets. If either rstctrl is non-zero
+ * or OMAP_PRM_HAS_RSTCTRL flag is set, we have reset control register
+ * for the domain.
+ */
+ if (!prm->data->rstctrl && !(prm->data->flags & OMAP_PRM_HAS_RSTCTRL))
+ return 0;
+
+ /* Check if we have the pdata callbacks in place */
+ if (!pdata || !pdata->clkdm_lookup || !pdata->clkdm_deny_idle ||
+ !pdata->clkdm_allow_idle)
+ return -EINVAL;
+
+ map = prm->data->rstmap;
+ if (!map)
+ return -EINVAL;
+
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.ops = &omap_reset_ops;
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.nr_resets = OMAP_MAX_RESETS;
+ reset->rcdev.of_xlate = omap_prm_reset_xlate;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->dev = &pdev->dev;
+ spin_lock_init(&reset->lock);
+
+ reset->prm = prm;
+
+ sprintf(buf, "%s_clkdm", prm->data->clkdm_name ? prm->data->clkdm_name :
+ prm->data->name);
+
+ if (!(prm->data->flags & OMAP_PRM_HAS_NO_CLKDM)) {
+ reset->clkdm = pdata->clkdm_lookup(buf);
+ if (!reset->clkdm)
+ return -EINVAL;
+ }
+
+ while (map->rst >= 0) {
+ reset->mask |= BIT(map->rst);
+ map++;
+ }
+
+ return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+}
+
+static int omap_prm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct omap_prm_data *data;
+ struct omap_prm *prm;
+ const struct of_device_id *match;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ match = of_match_device(omap_prm_id_table, &pdev->dev);
+ if (!match)
+ return -ENOTSUPP;
+
+ prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
+ if (!prm)
+ return -ENOMEM;
+
+ data = match->data;
+
+ while (data->base != res->start) {
+ if (!data->base)
+ return -EINVAL;
+ data++;
+ }
+
+ prm->data = data;
+
+ prm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(prm->base))
+ return PTR_ERR(prm->base);
+
+ return omap_prm_reset_init(pdev, prm);
+}
+
+static struct platform_driver omap_prm_driver = {
+ .probe = omap_prm_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = omap_prm_id_table,
+ },
+};
+builtin_platform_driver(omap_prm_driver);
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 600f57cf0c2e..23d90cb12ba9 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -2,7 +2,7 @@
/*
* ZynqMP Generic PM domain support
*
- * Copyright (C) 2015-2018 Xilinx, Inc.
+ * Copyright (C) 2015-2019 Xilinx, Inc.
*
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
@@ -25,6 +25,8 @@
static const struct zynqmp_eemi_ops *eemi_ops;
+static int min_capability;
+
/**
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain
@@ -106,7 +108,7 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
int ret;
struct pm_domain_data *pdd, *tmp;
struct zynqmp_pm_domain *pd;
- u32 capabilities = 0;
+ u32 capabilities = min_capability;
bool may_wakeup;
if (!eemi_ops->set_requirement)
@@ -283,6 +285,10 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
if (!domains)
return -ENOMEM;
+ if (!of_device_is_compatible(dev->parent->of_node,
+ "xlnx,zynqmp-firmware"))
+ min_capability = ZYNQMP_PM_CAPABILITY_UNUSABLE;
+
for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
pd->node_id = 0;
pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
diff --git a/drivers/staging/gasket/gasket_constants.h b/drivers/staging/gasket/gasket_constants.h
index 50d87c7b178c..9ea9c8833f27 100644
--- a/drivers/staging/gasket/gasket_constants.h
+++ b/drivers/staging/gasket/gasket_constants.h
@@ -13,9 +13,6 @@
/* The maximum devices per each type. */
#define GASKET_DEV_MAX 256
-/* The number of supported (and possible) PCI BARs. */
-#define GASKET_NUM_BARS 6
-
/* The number of supported Gasket page tables per device. */
#define GASKET_MAX_NUM_PAGE_TABLES 1
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index 13179f063a61..cd8be80d2076 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -371,7 +371,7 @@ static int gasket_setup_pci(struct pci_dev *pci_dev,
{
int i, mapped_bars, ret;
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
ret = gasket_map_pci_bar(gasket_dev, i);
if (ret) {
mapped_bars = i;
@@ -393,7 +393,7 @@ static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
{
int i;
- for (i = 0; i < GASKET_NUM_BARS; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
gasket_unmap_pci_bar(gasket_dev, i);
}
@@ -493,7 +493,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
(enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
switch (sysfs_type) {
case ATTR_BAR_OFFSETS:
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_desc = &driver_desc->bar_descriptions[i];
if (bar_desc->size == 0)
continue;
@@ -505,7 +505,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
}
break;
case ATTR_BAR_SIZES:
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_desc = &driver_desc->bar_descriptions[i];
if (bar_desc->size == 0)
continue;
@@ -556,7 +556,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
break;
case ATTR_USER_MEM_RANGES:
- for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
current_written =
gasket_write_mappable_regions(buf, driver_desc,
i);
@@ -736,7 +736,7 @@ static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
const struct gasket_driver_desc *driver_desc;
driver_desc = gasket_dev->internal_desc->driver_desc;
- for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
struct gasket_bar_desc bar_desc =
driver_desc->bar_descriptions[i];
diff --git a/drivers/staging/gasket/gasket_core.h b/drivers/staging/gasket/gasket_core.h
index be44ac1e3118..c417acadb0d5 100644
--- a/drivers/staging/gasket/gasket_core.h
+++ b/drivers/staging/gasket/gasket_core.h
@@ -268,7 +268,7 @@ struct gasket_dev {
char kobj_name[GASKET_NAME_MAX];
/* Virtual address of mapped BAR memory range. */
- struct gasket_bar_data bar_data[GASKET_NUM_BARS];
+ struct gasket_bar_data bar_data[PCI_STD_NUM_BARS];
/* Coherent buffer. */
struct gasket_coherent_buffer coherent_buffer;
@@ -369,7 +369,7 @@ struct gasket_driver_desc {
/* Set of 6 bar descriptions that describe all PCIe bars.
* Note that BUS/AXI devices (i.e. non PCI devices) use those.
*/
- struct gasket_bar_desc bar_descriptions[GASKET_NUM_BARS];
+ struct gasket_bar_desc bar_descriptions[PCI_STD_NUM_BARS];
/*
* Coherent buffer description.
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 54bb1ebd8eb5..af35251232eb 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -297,7 +297,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct net_device *ndev = cdev->lldi.ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int ret, i;
if (!lldi->vr->iscsi.size) {
@@ -305,8 +304,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
return -EACCES;
}
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
-
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d19e051f2bc2..7251a87bb576 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1165,7 +1165,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -2002,7 +2004,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -2189,24 +2193,22 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
goto empty_sendtargets;
}
- if (strncmp("SendTargets", text_in, 11) != 0) {
+ if (strncmp("SendTargets=", text_in, 12) != 0) {
pr_err("Received Text Data that is not"
" SendTargets, cannot continue.\n");
goto reject;
}
+ /* '=' confirmed in strncmp */
text_ptr = strchr(text_in, '=');
- if (!text_ptr) {
- pr_err("No \"=\" separator found in Text Data,"
- " cannot continue.\n");
- goto reject;
- }
- if (!strncmp("=All", text_ptr, 4)) {
+ BUG_ON(!text_ptr);
+ if (!strncmp("=All", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
- pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
+ pr_err("Unable to locate valid SendTargets%s value\n",
+ text_ptr);
goto reject;
}
@@ -4232,6 +4234,8 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_wait_for_sess_cmds(sess->se_sess);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 51ddca2033e0..0e54627d9aa8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -18,6 +18,22 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
+static char *chap_get_digest_name(const int digest_type)
+{
+ switch (digest_type) {
+ case CHAP_DIGEST_MD5:
+ return "md5";
+ case CHAP_DIGEST_SHA1:
+ return "sha1";
+ case CHAP_DIGEST_SHA256:
+ return "sha256";
+ case CHAP_DIGEST_SHA3_256:
+ return "sha3-256";
+ default:
+ return NULL;
+ }
+}
+
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -25,16 +41,21 @@ static int chap_gen_challenge(
unsigned int *c_len)
{
int ret;
- unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ unsigned char *challenge_asciihex;
struct iscsi_chap *chap = conn->auth_protocol;
- memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+ challenge_asciihex = kzalloc(chap->challenge_len * 2 + 1, GFP_KERNEL);
+ if (!challenge_asciihex)
+ return -ENOMEM;
- ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ memset(chap->challenge, 0, MAX_CHAP_CHALLENGE_LEN);
+
+ ret = get_random_bytes_wait(chap->challenge, chap->challenge_len);
if (unlikely(ret))
- return ret;
+ goto out;
+
bin2hex(challenge_asciihex, chap->challenge,
- CHAP_CHALLENGE_LENGTH);
+ chap->challenge_len);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
@@ -43,12 +64,29 @@ static int chap_gen_challenge(
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
+
+out:
+ kfree(challenge_asciihex);
+ return ret;
+}
+
+static int chap_test_algorithm(const char *name)
+{
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(tfm))
+ return -1;
+
+ crypto_free_shash(tfm);
return 0;
}
static int chap_check_algorithm(const char *a_str)
{
- char *tmp, *orig, *token;
+ char *tmp, *orig, *token, *digest_name;
+ long digest_type;
+ int r = CHAP_DIGEST_UNKNOWN;
tmp = kstrdup(a_str, GFP_KERNEL);
if (!tmp) {
@@ -70,15 +108,24 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;
- if (!strncmp(token, "5", 1)) {
- pr_debug("Selected MD5 Algorithm\n");
- kfree(orig);
- return CHAP_DIGEST_MD5;
+ if (kstrtol(token, 10, &digest_type))
+ continue;
+
+ digest_name = chap_get_digest_name(digest_type);
+ if (!digest_name)
+ continue;
+
+ pr_debug("Selected %s Algorithm\n", digest_name);
+ if (chap_test_algorithm(digest_name) < 0) {
+ pr_err("failed to allocate %s algo\n", digest_name);
+ } else {
+ r = digest_type;
+ goto out;
}
}
out:
kfree(orig);
- return CHAP_DIGEST_UNKNOWN;
+ return r;
}
static void chap_close(struct iscsi_conn *conn)
@@ -94,7 +141,7 @@ static struct iscsi_chap *chap_server_open(
char *aic_str,
unsigned int *aic_len)
{
- int ret;
+ int digest_type;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -109,17 +156,19 @@ static struct iscsi_chap *chap_server_open(
return NULL;
chap = conn->auth_protocol;
- ret = chap_check_algorithm(a_str);
- switch (ret) {
+ digest_type = chap_check_algorithm(a_str);
+ switch (digest_type) {
case CHAP_DIGEST_MD5:
- pr_debug("[server] Got CHAP_A=5\n");
- /*
- * Send back CHAP_A set to MD5.
- */
- *aic_len = sprintf(aic_str, "CHAP_A=5");
- *aic_len += 1;
- chap->digest_type = CHAP_DIGEST_MD5;
- pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ chap->digest_size = MD5_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA1:
+ chap->digest_size = SHA1_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA256:
+ chap->digest_size = SHA256_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA3_256:
+ chap->digest_size = SHA3_256_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_UNKNOWN:
default:
@@ -128,6 +177,16 @@ static struct iscsi_chap *chap_server_open(
return NULL;
}
+ chap->digest_name = chap_get_digest_name(digest_type);
+
+ /* Tie the challenge length to the digest size */
+ chap->challenge_len = chap->digest_size;
+
+ pr_debug("[server] Got CHAP_A=%d\n", digest_type);
+ *aic_len = sprintf(aic_str, "CHAP_A=%d", digest_type);
+ *aic_len += 1;
+ pr_debug("[server] Sending CHAP_A=%d\n", digest_type);
+
/*
* Set Identifier.
*/
@@ -146,7 +205,7 @@ static struct iscsi_chap *chap_server_open(
return chap;
}
-static int chap_server_compute_md5(
+static int chap_server_compute_hash(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
@@ -155,36 +214,57 @@ static int chap_server_compute_md5(
{
unsigned long id;
unsigned char id_as_uchar;
- unsigned char digest[MD5_SIGNATURE_SIZE];
- unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
- unsigned char identifier[10], *challenge = NULL;
- unsigned char *challenge_binhex = NULL;
- unsigned char client_digest[MD5_SIGNATURE_SIZE];
- unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char type;
+ unsigned char identifier[10], *initiatorchg = NULL;
+ unsigned char *initiatorchg_binhex = NULL;
+ unsigned char *digest = NULL;
+ unsigned char *response = NULL;
+ unsigned char *client_digest = NULL;
+ unsigned char *server_digest = NULL;
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_shash *tfm = NULL;
struct shash_desc *desc = NULL;
- int auth_ret = -1, ret, challenge_len;
+ int auth_ret = -1, ret, initiatorchg_len;
+
+ digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!digest) {
+ pr_err("Unable to allocate the digest buffer\n");
+ goto out;
+ }
+
+ response = kzalloc(chap->digest_size * 2 + 2, GFP_KERNEL);
+ if (!response) {
+ pr_err("Unable to allocate the response buffer\n");
+ goto out;
+ }
+
+ client_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!client_digest) {
+ pr_err("Unable to allocate the client_digest buffer\n");
+ goto out;
+ }
+
+ server_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!server_digest) {
+ pr_err("Unable to allocate the server_digest buffer\n");
+ goto out;
+ }
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
- memset(digest, 0, MD5_SIGNATURE_SIZE);
- memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
- memset(client_digest, 0, MD5_SIGNATURE_SIZE);
- memset(server_digest, 0, MD5_SIGNATURE_SIZE);
- challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge) {
+ initiatorchg = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
- challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge_binhex) {
- pr_err("Unable to allocate challenge_binhex buffer\n");
+ initiatorchg_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg_binhex) {
+ pr_err("Unable to allocate initiatorchg_binhex buffer\n");
goto out;
}
/*
@@ -219,18 +299,18 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ if (strlen(chap_r) != chap->digest_size * 2) {
pr_err("Malformed CHAP_R\n");
goto out;
}
- if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
pr_err("Malformed CHAP_R\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- tfm = crypto_alloc_shash("md5", 0, 0);
+ tfm = crypto_alloc_shash(chap->digest_name, 0, 0);
if (IS_ERR(tfm)) {
tfm = NULL;
pr_err("Unable to allocate struct crypto_shash\n");
@@ -265,21 +345,23 @@ static int chap_server_compute_md5(
}
ret = crypto_shash_finup(desc, chap->challenge,
- CHAP_CHALLENGE_LENGTH, server_digest);
+ chap->challenge_len, server_digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for challenge\n");
goto out;
}
- bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
- pr_debug("[server] MD5 Server Digest: %s\n", response);
+ bin2hex(response, server_digest, chap->digest_size);
+ pr_debug("[server] %s Server Digest: %s\n",
+ chap->digest_name, response);
- if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
- pr_debug("[server] MD5 Digests do not match!\n\n");
+ if (memcmp(server_digest, client_digest, chap->digest_size) != 0) {
+ pr_debug("[server] %s Digests do not match!\n\n",
+ chap->digest_name);
goto out;
} else
- pr_debug("[server] MD5 Digests match, CHAP connection"
- " successful.\n\n");
+ pr_debug("[server] %s Digests match, CHAP connection"
+ " successful.\n\n", chap->digest_name);
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
@@ -317,7 +399,7 @@ static int chap_server_compute_md5(
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
- challenge, &type) < 0) {
+ initiatorchg, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
@@ -326,26 +408,28 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
- if (!challenge_len) {
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
- if (challenge_len > 1024) {
+ if (initiatorchg_len > 1024) {
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
- if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ if (hex2bin(initiatorchg_binhex, initiatorchg, initiatorchg_len) < 0) {
pr_err("Malformed CHAP_C\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
* the target.
*/
- if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+ if (initiatorchg_len == chap->challenge_len &&
+ !memcmp(initiatorchg_binhex, chap->challenge,
+ initiatorchg_len)) {
pr_err("initiator CHAP_C matches target CHAP_C, failing"
" login attempt\n");
goto out;
@@ -377,7 +461,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
- ret = crypto_shash_finup(desc, challenge_binhex, challenge_len,
+ ret = crypto_shash_finup(desc, initiatorchg_binhex, initiatorchg_len,
digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for ma challenge\n");
@@ -393,7 +477,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- bin2hex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, chap->digest_size);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
@@ -403,33 +487,15 @@ out:
kzfree(desc);
if (tfm)
crypto_free_shash(tfm);
- kfree(challenge);
- kfree(challenge_binhex);
+ kfree(initiatorchg);
+ kfree(initiatorchg_binhex);
+ kfree(digest);
+ kfree(response);
+ kfree(server_digest);
+ kfree(client_digest);
return auth_ret;
}
-static int chap_got_response(
- struct iscsi_conn *conn,
- struct iscsi_node_auth *auth,
- char *nr_in_ptr,
- char *nr_out_ptr,
- unsigned int *nr_out_len)
-{
- struct iscsi_chap *chap = conn->auth_protocol;
-
- switch (chap->digest_type) {
- case CHAP_DIGEST_MD5:
- if (chap_server_compute_md5(conn, auth, nr_in_ptr,
- nr_out_ptr, nr_out_len) < 0)
- return -1;
- return 0;
- default:
- pr_err("Unknown CHAP digest type %d!\n",
- chap->digest_type);
- return -1;
- }
-}
-
u32 chap_main_loop(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -448,7 +514,7 @@ u32 chap_main_loop(
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
- if (chap_got_response(conn, auth, in_text, out_text,
+ if (chap_server_compute_hash(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index d5600ac30b53..fc75c1c20e23 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -6,14 +6,19 @@
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
-#define CHAP_DIGEST_SHA 6
+#define CHAP_DIGEST_SHA1 6
+#define CHAP_DIGEST_SHA256 7
+#define CHAP_DIGEST_SHA3_256 8
-#define CHAP_CHALLENGE_LENGTH 16
+#define MAX_CHAP_CHALLENGE_LEN 32
#define CHAP_CHALLENGE_STR_LEN 4096
-#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_RESPONSE_LENGTH 128 /* sufficient for SHA3 256 */
#define MAX_CHAP_N_SIZE 512
#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+#define SHA1_SIGNATURE_SIZE 20 /* 20 bytes in a SHA1 message digest */
+#define SHA256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA256 message digest */
+#define SHA3_256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA3 256 message digest */
#define CHAP_STAGE_CLIENT_A 1
#define CHAP_STAGE_SERVER_AIC 2
@@ -28,9 +33,11 @@ extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *,
int *, int *);
struct iscsi_chap {
- unsigned char digest_type;
unsigned char id;
- unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned char challenge[MAX_CHAP_CHALLENGE_LEN];
+ unsigned int challenge_len;
+ unsigned char *digest_name;
+ unsigned int digest_size;
unsigned int authenticate_target;
unsigned int chap_state;
} ____cacheline_aligned;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index daf47f38e081..240c4c4344f6 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -93,9 +93,6 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
-#define X_EXTENSIONKEY "X-com.sbei.version"
-#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
-#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 3c79411c4cd0..6b4b354c88aa 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -118,7 +118,7 @@ static int srp_get_pr_transport_id(
memset(buf + 8, 0, leading_zero_bytes);
rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
if (rc < 0) {
- pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+ pr_debug("hex2bin failed for %s: %d\n", p, rc);
return rc;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index e5a71addbb06..d24e0a3ba3ff 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -32,9 +32,6 @@
extern struct se_device *g_lun0_dev;
-static DEFINE_SPINLOCK(tpg_lock);
-static LIST_HEAD(tpg_list);
-
/* __core_tpg_get_initiator_node_acl():
*
* mutex_lock(&tpg->acl_node_mutex); must be held when calling
@@ -475,7 +472,6 @@ int core_tpg_register(
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->session_lock);
mutex_init(&se_tpg->tpg_lun_mutex);
@@ -494,10 +490,6 @@ int core_tpg_register(
}
}
- spin_lock_bh(&tpg_lock);
- list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
- spin_unlock_bh(&tpg_lock);
-
pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
"Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
@@ -519,10 +511,6 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
- spin_lock_bh(&tpg_lock);
- list_del(&se_tpg->se_tpg_node);
- spin_unlock_bh(&tpg_lock);
-
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7f06a62f8661..ea482d4b1f00 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -584,6 +584,15 @@ void transport_free_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_free_session);
+static int target_release_res(struct se_device *dev, void *data)
+{
+ struct se_session *sess = data;
+
+ if (dev->reservation_holder == sess)
+ target_release_reservation(dev);
+ return 0;
+}
+
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
@@ -600,6 +609,12 @@ void transport_deregister_session(struct se_session *se_sess)
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+ /*
+ * Since the session is being removed, release SPC-2
+ * reservations held by the session that is disappearing.
+ */
+ target_for_each_device(target_release_res, se_sess);
+
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->fabric_name);
/*
@@ -1243,6 +1258,19 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
return TCM_NO_SENSE;
}
+/**
+ * target_cmd_size_check - Check whether there will be a residual.
+ * @cmd: SCSI command.
+ * @size: Data buffer size derived from CDB. The data buffer size provided by
+ * the SCSI transport driver is available in @cmd->data_length.
+ *
+ * Compare the data buffer size from the CDB with the data buffer limit from the transport
+ * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
+ *
+ * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ *
+ * Return: TCM_NO_SENSE
+ */
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 35be1be87d2a..0b9dfa6b17bc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -499,7 +499,7 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
schedule_delayed_work(&tcmu_unmap_work, 0);
/* try to get new page from the mm */
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
goto err_alloc;
@@ -573,7 +573,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
+ tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
if (!tcmu_cmd)
return NULL;
@@ -584,7 +584,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
- GFP_KERNEL);
+ GFP_NOIO);
if (!tcmu_cmd->dbi) {
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
return NULL;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index b9b1e92c6f8d..425c1070de08 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -467,7 +467,6 @@ int target_xcopy_setup_pt(void)
}
memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
- INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 13b0269a0abc..cf2367ba08d6 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -554,6 +554,13 @@ static int check_mem_type(unsigned long start, size_t num_pages)
struct mm_struct *mm = current->mm;
int rc;
+ /*
+ * Allow kernel address to register with OP-TEE as kernel
+ * pages are configured as normal memory only.
+ */
+ if (virt_addr_valid(start))
+ return 0;
+
down_read(&mm->mmap_sem);
rc = __check_mem_type(find_vma(mm, start),
start + num_pages * PAGE_SIZE);
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 1854a3db7345..b830e0a87fba 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -643,11 +643,6 @@ static struct optee *optee_probe(struct device_node *np)
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
- rc = optee_enumerate_devices();
- if (rc)
- goto err;
-
- pr_info("initialized driver\n");
return optee;
err:
if (optee) {
@@ -702,9 +697,10 @@ static struct optee *optee_svc;
static int __init optee_driver_init(void)
{
- struct device_node *fw_np;
- struct device_node *np;
- struct optee *optee;
+ struct device_node *fw_np = NULL;
+ struct device_node *np = NULL;
+ struct optee *optee = NULL;
+ int rc = 0;
/* Node is supposed to be below /firmware */
fw_np = of_find_node_by_name(NULL, "firmware");
@@ -723,6 +719,14 @@ static int __init optee_driver_init(void)
if (IS_ERR(optee))
return PTR_ERR(optee);
+ rc = optee_enumerate_devices();
+ if (rc) {
+ optee_remove(optee);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+
optee_svc = optee;
return 0;
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
index de1d9b8fad90..0332a5301d61 100644
--- a/drivers/tee/optee/shm_pool.c
+++ b/drivers/tee/optee/shm_pool.c
@@ -17,6 +17,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
{
unsigned int order = get_order(size);
struct page *page;
+ int rc = 0;
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
@@ -26,12 +27,21 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
shm->paddr = page_to_phys(page);
shm->size = PAGE_SIZE << order;
- return 0;
+ if (shm->flags & TEE_SHM_DMA_BUF) {
+ shm->flags |= TEE_SHM_REGISTER;
+ rc = optee_shm_register(shm->ctx, shm, &page, 1 << order,
+ (unsigned long)shm->kaddr);
+ }
+
+ return rc;
}
static void pool_op_free(struct tee_shm_pool_mgr *poolm,
struct tee_shm *shm)
{
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ optee_shm_unregister(shm->ctx, shm);
+
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
shm->kaddr = NULL;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 001a21abcc28..59b79fc48266 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -144,6 +144,7 @@ config THERMAL_GOV_USER_SPACE
config THERMAL_GOV_POWER_ALLOCATOR
bool "Power allocator thermal governor"
+ depends on ENERGY_MODEL
help
Enable this to manage platform thermals by dynamically
allocating and limiting power to devices.
@@ -348,6 +349,17 @@ config MTK_THERMAL
Enable this option if you want to have support for thermal management
controller present in Mediatek SoCs
+config AMLOGIC_THERMAL
+ tristate "Amlogic Thermal Support"
+ default ARCH_MESON
+ depends on OF && ARCH_MESON
+ help
+ If you say yes here you get support for Amlogic Thermal
+ for G12 SoC Family.
+
+ This driver can also be built as a module. If so, the module will
+ be called amlogic_thermal.
+
menu "Intel thermal drivers"
depends on X86 || X86_INTEL_QUARK || COMPILE_TEST
source "drivers/thermal/intel/Kconfig"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 74a37c7f847a..baeb70bf0568 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o
obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
+obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
new file mode 100644
index 000000000000..8a9e9bc421c6
--- /dev/null
+++ b/drivers/thermal/amlogic_thermal.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Thermal Sensor Driver
+ *
+ * Copyright (C) 2017 Huan Biao <huan.biao@amlogic.com>
+ * Copyright (C) 2019 Guillaume La Roque <glaroque@baylibre.com>
+ *
+ * Register value to celsius temperature formulas:
+ * Read_Val m * U
+ * U = ---------, Uptat = ---------
+ * 2^16 1 + n * U
+ *
+ * Temperature = A * ( Uptat + u_efuse / 2^16 )- B
+ *
+ * A B m n : calibration parameters
+ * u_efuse : fused calibration value, it's a signed 16 bits value
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define TSENSOR_CFG_REG1 0x4
+ #define TSENSOR_CFG_REG1_RSET_VBG BIT(12)
+ #define TSENSOR_CFG_REG1_RSET_ADC BIT(11)
+ #define TSENSOR_CFG_REG1_VCM_EN BIT(10)
+ #define TSENSOR_CFG_REG1_VBG_EN BIT(9)
+ #define TSENSOR_CFG_REG1_OUT_CTL BIT(6)
+ #define TSENSOR_CFG_REG1_FILTER_EN BIT(5)
+ #define TSENSOR_CFG_REG1_DEM_EN BIT(3)
+ #define TSENSOR_CFG_REG1_CH_SEL GENMASK(1, 0)
+ #define TSENSOR_CFG_REG1_ENABLE \
+ (TSENSOR_CFG_REG1_FILTER_EN | \
+ TSENSOR_CFG_REG1_VCM_EN | \
+ TSENSOR_CFG_REG1_VBG_EN | \
+ TSENSOR_CFG_REG1_DEM_EN | \
+ TSENSOR_CFG_REG1_CH_SEL)
+
+#define TSENSOR_STAT0 0x40
+
+#define TSENSOR_STAT9 0x64
+
+#define TSENSOR_READ_TEMP_MASK GENMASK(15, 0)
+#define TSENSOR_TEMP_MASK GENMASK(11, 0)
+
+#define TSENSOR_TRIM_SIGN_MASK BIT(15)
+#define TSENSOR_TRIM_TEMP_MASK GENMASK(14, 0)
+#define TSENSOR_TRIM_VERSION_MASK GENMASK(31, 24)
+
+#define TSENSOR_TRIM_VERSION(_version) \
+ FIELD_GET(TSENSOR_TRIM_VERSION_MASK, _version)
+
+#define TSENSOR_TRIM_CALIB_VALID_MASK (GENMASK(3, 2) | BIT(7))
+
+#define TSENSOR_CALIB_OFFSET 1
+#define TSENSOR_CALIB_SHIFT 4
+
+/**
+ * struct amlogic_thermal_soc_calib_data
+ * @A, B, m, n: calibration parameters
+ * This structure is required for configuration of amlogic thermal driver.
+ */
+struct amlogic_thermal_soc_calib_data {
+ int A;
+ int B;
+ int m;
+ int n;
+};
+
+/**
+ * struct amlogic_thermal_data
+ * @u_efuse_off: register offset to read fused calibration value
+ * @calibration_parameters: calibration parameters structure pointer
+ * @regmap_config: regmap config for the device
+ * This structure is required for configuration of amlogic thermal driver.
+ */
+struct amlogic_thermal_data {
+ int u_efuse_off;
+ const struct amlogic_thermal_soc_calib_data *calibration_parameters;
+ const struct regmap_config *regmap_config;
+};
+
+struct amlogic_thermal {
+ struct platform_device *pdev;
+ const struct amlogic_thermal_data *data;
+ struct regmap *regmap;
+ struct regmap *sec_ao_map;
+ struct clk *clk;
+ struct thermal_zone_device *tzd;
+ u32 trim_info;
+};
+
+/*
+ * Calculate a temperature value from a temperature code.
+ * The unit of the temperature is degree milliCelsius.
+ */
+static int amlogic_thermal_code_to_millicelsius(struct amlogic_thermal *pdata,
+ int temp_code)
+{
+ const struct amlogic_thermal_soc_calib_data *param =
+ pdata->data->calibration_parameters;
+ int temp;
+ s64 factor, Uptat, uefuse;
+
+ uefuse = pdata->trim_info & TSENSOR_TRIM_SIGN_MASK ?
+ ~(pdata->trim_info & TSENSOR_TRIM_TEMP_MASK) + 1 :
+ (pdata->trim_info & TSENSOR_TRIM_TEMP_MASK);
+
+ factor = param->n * temp_code;
+ factor = div_s64(factor, 100);
+
+ Uptat = temp_code * param->m;
+ Uptat = div_s64(Uptat, 100);
+ Uptat = Uptat * BIT(16);
+ Uptat = div_s64(Uptat, BIT(16) + factor);
+
+ temp = (Uptat + uefuse) * param->A;
+ temp = div_s64(temp, BIT(16));
+ temp = (temp - param->B) * 100;
+
+ return temp;
+}
+
+static int amlogic_thermal_initialize(struct amlogic_thermal *pdata)
+{
+ int ret = 0;
+ int ver;
+
+ regmap_read(pdata->sec_ao_map, pdata->data->u_efuse_off,
+ &pdata->trim_info);
+
+ ver = TSENSOR_TRIM_VERSION(pdata->trim_info);
+
+ if ((ver & TSENSOR_TRIM_CALIB_VALID_MASK) == 0) {
+ ret = -EINVAL;
+ dev_err(&pdata->pdev->dev,
+ "tsensor thermal calibration not supported: 0x%x!\n",
+ ver);
+ }
+
+ return ret;
+}
+
+static int amlogic_thermal_enable(struct amlogic_thermal *data)
+{
+ int ret;
+
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(data->regmap, TSENSOR_CFG_REG1,
+ TSENSOR_CFG_REG1_ENABLE, TSENSOR_CFG_REG1_ENABLE);
+
+ return 0;
+}
+
+static int amlogic_thermal_disable(struct amlogic_thermal *data)
+{
+ regmap_update_bits(data->regmap, TSENSOR_CFG_REG1,
+ TSENSOR_CFG_REG1_ENABLE, 0);
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int amlogic_thermal_get_temp(void *data, int *temp)
+{
+ unsigned int tval;
+ struct amlogic_thermal *pdata = data;
+
+ if (!data)
+ return -EINVAL;
+
+ regmap_read(pdata->regmap, TSENSOR_STAT0, &tval);
+ *temp =
+ amlogic_thermal_code_to_millicelsius(pdata,
+ tval & TSENSOR_READ_TEMP_MASK);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops amlogic_thermal_ops = {
+ .get_temp = amlogic_thermal_get_temp,
+};
+
+static const struct regmap_config amlogic_thermal_regmap_config_g12a = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = TSENSOR_STAT9,
+};
+
+static const struct amlogic_thermal_soc_calib_data amlogic_thermal_g12a = {
+ .A = 9411,
+ .B = 3159,
+ .m = 424,
+ .n = 324,
+};
+
+static const struct amlogic_thermal_data amlogic_thermal_g12a_cpu_param = {
+ .u_efuse_off = 0x128,
+ .calibration_parameters = &amlogic_thermal_g12a,
+ .regmap_config = &amlogic_thermal_regmap_config_g12a,
+};
+
+static const struct amlogic_thermal_data amlogic_thermal_g12a_ddr_param = {
+ .u_efuse_off = 0xf0,
+ .calibration_parameters = &amlogic_thermal_g12a,
+ .regmap_config = &amlogic_thermal_regmap_config_g12a,
+};
+
+static const struct of_device_id of_amlogic_thermal_match[] = {
+ {
+ .compatible = "amlogic,g12a-ddr-thermal",
+ .data = &amlogic_thermal_g12a_ddr_param,
+ },
+ {
+ .compatible = "amlogic,g12a-cpu-thermal",
+ .data = &amlogic_thermal_g12a_cpu_param,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_amlogic_thermal_match);
+
+static int amlogic_thermal_probe(struct platform_device *pdev)
+{
+ struct amlogic_thermal *pdata;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ int ret;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->data = of_device_get_match_data(dev);
+ pdata->pdev = pdev;
+ platform_set_drvdata(pdev, pdata);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(base);
+ }
+
+ pdata->regmap = devm_regmap_init_mmio(dev, base,
+ pdata->data->regmap_config);
+ if (IS_ERR(pdata->regmap))
+ return PTR_ERR(pdata->regmap);
+
+ pdata->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ if (PTR_ERR(pdata->clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(pdata->clk);
+ }
+
+ pdata->sec_ao_map = syscon_regmap_lookup_by_phandle
+ (pdev->dev.of_node, "amlogic,ao-secure");
+ if (IS_ERR(pdata->sec_ao_map)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(pdata->sec_ao_map);
+ }
+
+ pdata->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ 0,
+ pdata,
+ &amlogic_thermal_ops);
+ if (IS_ERR(pdata->tzd)) {
+ ret = PTR_ERR(pdata->tzd);
+ dev_err(dev, "Failed to register tsensor: %d\n", ret);
+ return ret;
+ }
+
+ ret = amlogic_thermal_initialize(pdata);
+ if (ret)
+ return ret;
+
+ ret = amlogic_thermal_enable(pdata);
+
+ return ret;
+}
+
+static int amlogic_thermal_remove(struct platform_device *pdev)
+{
+ struct amlogic_thermal *data = platform_get_drvdata(pdev);
+
+ return amlogic_thermal_disable(data);
+}
+
+static int __maybe_unused amlogic_thermal_suspend(struct device *dev)
+{
+ struct amlogic_thermal *data = dev_get_drvdata(dev);
+
+ return amlogic_thermal_disable(data);
+}
+
+static int __maybe_unused amlogic_thermal_resume(struct device *dev)
+{
+ struct amlogic_thermal *data = dev_get_drvdata(dev);
+
+ return amlogic_thermal_enable(data);
+}
+
+static SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops,
+ amlogic_thermal_suspend, amlogic_thermal_resume);
+
+static struct platform_driver amlogic_thermal_driver = {
+ .driver = {
+ .name = "amlogic_thermal",
+ .pm = &amlogic_thermal_pm_ops,
+ .of_match_table = of_amlogic_thermal_match,
+ },
+ .probe = amlogic_thermal_probe,
+ .remove = amlogic_thermal_remove,
+};
+
+module_platform_driver(amlogic_thermal_driver);
+
+MODULE_AUTHOR("Guillaume La Roque <glaroque@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6b9865c786ba..52569b27b426 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
+#include <linux/energy_model.h>
#include <trace/events/thermal.h>
@@ -38,19 +39,6 @@
*/
/**
- * struct freq_table - frequency table along with power entries
- * @frequency: frequency in KHz
- * @power: power in mW
- *
- * This structure is built when the cooling device registers and helps
- * in translating frequency to power and vice versa.
- */
-struct freq_table {
- u32 frequency;
- u32 power;
-};
-
-/**
* struct time_in_idle - Idle time stats
* @time: previous reading of the absolute time that this cpu was idle
* @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
@@ -69,7 +57,7 @@ struct time_in_idle {
* cooling devices.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
- * @freq_table: Freq table in descending order of frequencies
+ * @em: Reference on the Energy Model of the device
* @cdev: thermal_cooling_device pointer to keep track of the
* registered cooling device.
* @policy: cpufreq policy.
@@ -84,7 +72,7 @@ struct cpufreq_cooling_device {
u32 last_load;
unsigned int cpufreq_state;
unsigned int max_level;
- struct freq_table *freq_table; /* In descending order */
+ struct em_perf_domain *em;
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
@@ -95,8 +83,7 @@ static DEFINE_IDA(cpufreq_ida);
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_cdev_list);
-/* Below code defines functions to be used for cpufreq as cooling device */
-
+#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
/**
* get_level: Find the level for a particular frequency
* @cpufreq_cdev: cpufreq_cdev for which the property is required
@@ -107,114 +94,40 @@ static LIST_HEAD(cpufreq_cdev_list);
static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned int freq)
{
- struct freq_table *freq_table = cpufreq_cdev->freq_table;
- unsigned long level;
+ int i;
- for (level = 1; level <= cpufreq_cdev->max_level; level++)
- if (freq > freq_table[level].frequency)
+ for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+ if (freq > cpufreq_cdev->em->table[i].frequency)
break;
-
- return level - 1;
-}
-
-/**
- * update_freq_table() - Update the freq table with power numbers
- * @cpufreq_cdev: the cpufreq cooling device in which to update the table
- * @capacitance: dynamic power coefficient for these cpus
- *
- * Update the freq table with power numbers. This table will be used in
- * cpu_power_to_freq() and cpu_freq_to_power() to convert between power and
- * frequency efficiently. Power is stored in mW, frequency in KHz. The
- * resulting table is in descending order.
- *
- * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
- * or -ENOMEM if we run out of memory.
- */
-static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
- u32 capacitance)
-{
- struct freq_table *freq_table = cpufreq_cdev->freq_table;
- struct dev_pm_opp *opp;
- struct device *dev = NULL;
- int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
-
- dev = get_cpu_device(cpu);
- if (unlikely(!dev)) {
- pr_warn("No cpu device for cpu %d\n", cpu);
- return -ENODEV;
}
- num_opps = dev_pm_opp_get_opp_count(dev);
- if (num_opps < 0)
- return num_opps;
-
- /*
- * The cpufreq table is also built from the OPP table and so the count
- * should match.
- */
- if (num_opps != cpufreq_cdev->max_level + 1) {
- dev_warn(dev, "Number of OPPs not matching with max_levels\n");
- return -EINVAL;
- }
-
- for (i = 0; i <= cpufreq_cdev->max_level; i++) {
- unsigned long freq = freq_table[i].frequency * 1000;
- u32 freq_mhz = freq_table[i].frequency / 1000;
- u64 power;
- u32 voltage_mv;
-
- /*
- * Find ceil frequency as 'freq' may be slightly lower than OPP
- * freq due to truncation while converting to kHz.
- */
- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- if (IS_ERR(opp)) {
- dev_err(dev, "failed to get opp for %lu frequency\n",
- freq);
- return -EINVAL;
- }
-
- voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
- dev_pm_opp_put(opp);
-
- /*
- * Do the multiplication with MHz and millivolt so as
- * to not overflow.
- */
- power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
- do_div(power, 1000000000);
-
- /* power is stored in mW */
- freq_table[i].power = power;
- }
-
- return 0;
+ return cpufreq_cdev->max_level - i - 1;
}
static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
u32 freq)
{
int i;
- struct freq_table *freq_table = cpufreq_cdev->freq_table;
- for (i = 1; i <= cpufreq_cdev->max_level; i++)
- if (freq > freq_table[i].frequency)
+ for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+ if (freq > cpufreq_cdev->em->table[i].frequency)
break;
+ }
- return freq_table[i - 1].power;
+ return cpufreq_cdev->em->table[i + 1].power;
}
static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
u32 power)
{
int i;
- struct freq_table *freq_table = cpufreq_cdev->freq_table;
- for (i = 1; i <= cpufreq_cdev->max_level; i++)
- if (power > freq_table[i].power)
+ for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+ if (power > cpufreq_cdev->em->table[i].power)
break;
+ }
- return freq_table[i - 1].frequency;
+ return cpufreq_cdev->em->table[i + 1].frequency;
}
/**
@@ -265,76 +178,6 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
}
-/* cpufreq cooling device callback functions are defined below */
-
-/**
- * cpufreq_get_max_state - callback function to get the max cooling state.
- * @cdev: thermal cooling device pointer.
- * @state: fill this variable with the max cooling state.
- *
- * Callback for the thermal cooling device to return the cpufreq
- * max cooling state.
- *
- * Return: 0 on success, an error code otherwise.
- */
-static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
-
- *state = cpufreq_cdev->max_level;
- return 0;
-}
-
-/**
- * cpufreq_get_cur_state - callback function to get the current cooling state.
- * @cdev: thermal cooling device pointer.
- * @state: fill this variable with the current cooling state.
- *
- * Callback for the thermal cooling device to return the cpufreq
- * current cooling state.
- *
- * Return: 0 on success, an error code otherwise.
- */
-static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
-
- *state = cpufreq_cdev->cpufreq_state;
-
- return 0;
-}
-
-/**
- * cpufreq_set_cur_state - callback function to set the current cooling state.
- * @cdev: thermal cooling device pointer.
- * @state: set this variable to the current cooling state.
- *
- * Callback for the thermal cooling device to change the cpufreq
- * current cooling state.
- *
- * Return: 0 on success, an error code otherwise.
- */
-static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state)
-{
- struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
-
- /* Request state should be less than max_level */
- if (WARN_ON(state > cpufreq_cdev->max_level))
- return -EINVAL;
-
- /* Check if the old cooling action is same as new cooling action */
- if (cpufreq_cdev->cpufreq_state == state)
- return 0;
-
- cpufreq_cdev->cpufreq_state = state;
-
- return freq_qos_update_request(&cpufreq_cdev->qos_req,
- cpufreq_cdev->freq_table[state].frequency);
-}
-
/**
* cpufreq_get_requested_power() - get the current power
* @cdev: &thermal_cooling_device pointer
@@ -425,7 +268,7 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz,
unsigned long state, u32 *power)
{
- unsigned int freq, num_cpus;
+ unsigned int freq, num_cpus, idx;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
@@ -434,7 +277,8 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
- freq = cpufreq_cdev->freq_table[state].frequency;
+ idx = cpufreq_cdev->max_level - state;
+ freq = cpufreq_cdev->em->table[idx].frequency;
*power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
return 0;
@@ -479,43 +323,142 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
return 0;
}
-/* Bind cpufreq callbacks to thermal cooling device ops */
+static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev,
+ struct em_perf_domain *em) {
+ struct cpufreq_policy *policy;
+ unsigned int nr_levels;
-static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
- .get_max_state = cpufreq_get_max_state,
- .get_cur_state = cpufreq_get_cur_state,
- .set_cur_state = cpufreq_set_cur_state,
-};
+ if (!em)
+ return false;
-static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
- .get_max_state = cpufreq_get_max_state,
- .get_cur_state = cpufreq_get_cur_state,
- .set_cur_state = cpufreq_set_cur_state,
- .get_requested_power = cpufreq_get_requested_power,
- .state2power = cpufreq_state2power,
- .power2state = cpufreq_power2state,
-};
+ policy = cpufreq_cdev->policy;
+ if (!cpumask_equal(policy->related_cpus, to_cpumask(em->cpus))) {
+ pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n",
+ cpumask_pr_args(to_cpumask(em->cpus)),
+ cpumask_pr_args(policy->related_cpus));
+ return false;
+ }
-static unsigned int find_next_max(struct cpufreq_frequency_table *table,
- unsigned int prev_max)
+ nr_levels = cpufreq_cdev->max_level + 1;
+ if (em->nr_cap_states != nr_levels) {
+ pr_err("The number of cap states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
+ cpumask_pr_args(to_cpumask(em->cpus)),
+ em->nr_cap_states, nr_levels);
+ return false;
+ }
+
+ return true;
+}
+#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
+
+static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
+ unsigned long state)
{
- struct cpufreq_frequency_table *pos;
- unsigned int max = 0;
+ struct cpufreq_policy *policy;
+ unsigned long idx;
- cpufreq_for_each_valid_entry(pos, table) {
- if (pos->frequency > max && pos->frequency < prev_max)
- max = pos->frequency;
+#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
+ /* Use the Energy Model table if available */
+ if (cpufreq_cdev->em) {
+ idx = cpufreq_cdev->max_level - state;
+ return cpufreq_cdev->em->table[idx].frequency;
}
+#endif
+
+ /* Otherwise, fallback on the CPUFreq table */
+ policy = cpufreq_cdev->policy;
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ idx = cpufreq_cdev->max_level - state;
+ else
+ idx = state;
+
+ return policy->freq_table[idx].frequency;
+}
+
+/* cpufreq cooling device callback functions are defined below */
+
+/**
+ * cpufreq_get_max_state - callback function to get the max cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the max cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * max cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+
+ *state = cpufreq_cdev->max_level;
+ return 0;
+}
+
+/**
+ * cpufreq_get_cur_state - callback function to get the current cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
- return max;
+ *state = cpufreq_cdev->cpufreq_state;
+
+ return 0;
}
/**
+ * cpufreq_set_cur_state - callback function to set the current cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+
+ /* Request state should be less than max_level */
+ if (WARN_ON(state > cpufreq_cdev->max_level))
+ return -EINVAL;
+
+ /* Check if the old cooling action is same as new cooling action */
+ if (cpufreq_cdev->cpufreq_state == state)
+ return 0;
+
+ cpufreq_cdev->cpufreq_state = state;
+
+ return freq_qos_update_request(&cpufreq_cdev->qos_req,
+ get_state_freq(cpufreq_cdev, state));
+}
+
+/* Bind cpufreq callbacks to thermal cooling device ops */
+
+static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
+ .get_max_state = cpufreq_get_max_state,
+ .get_cur_state = cpufreq_get_cur_state,
+ .set_cur_state = cpufreq_set_cur_state,
+};
+
+/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
- * @capacitance: dynamic power coefficient for these cpus
+ * @em: Energy Model of the cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -527,12 +470,13 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy, u32 capacitance)
+ struct cpufreq_policy *policy,
+ struct em_perf_domain *em)
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
char dev_name[THERMAL_NAME_LENGTH];
- unsigned int freq, i, num_cpus;
+ unsigned int i, num_cpus;
struct device *dev;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
@@ -573,51 +517,36 @@ __cpufreq_cooling_register(struct device_node *np,
/* max_level is an index, not a counter */
cpufreq_cdev->max_level = i - 1;
- cpufreq_cdev->freq_table = kmalloc_array(i,
- sizeof(*cpufreq_cdev->freq_table),
- GFP_KERNEL);
- if (!cpufreq_cdev->freq_table) {
- cdev = ERR_PTR(-ENOMEM);
- goto free_idle_time;
- }
-
ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
cdev = ERR_PTR(ret);
- goto free_table;
+ goto free_idle_time;
}
cpufreq_cdev->id = ret;
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
cpufreq_cdev->id);
- /* Fill freq-table in descending order of frequencies */
- for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
- freq = find_next_max(policy->freq_table, freq);
- cpufreq_cdev->freq_table[i].frequency = freq;
-
- /* Warn for duplicate entries */
- if (!freq)
- pr_warn("%s: table has duplicate entries\n", __func__);
- else
- pr_debug("%s: freq:%u KHz\n", __func__, freq);
- }
-
- if (capacitance) {
- ret = update_freq_table(cpufreq_cdev, capacitance);
- if (ret) {
- cdev = ERR_PTR(ret);
- goto remove_ida;
- }
-
- cooling_ops = &cpufreq_power_cooling_ops;
- } else {
- cooling_ops = &cpufreq_cooling_ops;
+ cooling_ops = &cpufreq_cooling_ops;
+
+#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
+ if (em_is_sane(cpufreq_cdev, em)) {
+ cpufreq_cdev->em = em;
+ cooling_ops->get_requested_power = cpufreq_get_requested_power;
+ cooling_ops->state2power = cpufreq_state2power;
+ cooling_ops->power2state = cpufreq_power2state;
+ } else
+#endif
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) {
+ pr_err("%s: unsorted frequency tables are not supported\n",
+ __func__);
+ cdev = ERR_PTR(-EINVAL);
+ goto remove_ida;
}
ret = freq_qos_add_request(&policy->constraints,
&cpufreq_cdev->qos_req, FREQ_QOS_MAX,
- cpufreq_cdev->freq_table[0].frequency);
+ get_state_freq(cpufreq_cdev, 0));
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
@@ -640,8 +569,6 @@ remove_qos_req:
freq_qos_remove_request(&cpufreq_cdev->qos_req);
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
-free_table:
- kfree(cpufreq_cdev->freq_table);
free_idle_time:
kfree(cpufreq_cdev->idle_time);
free_cdev:
@@ -663,7 +590,7 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- return __cpufreq_cooling_register(NULL, policy, 0);
+ return __cpufreq_cooling_register(NULL, policy, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
@@ -691,7 +618,6 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
struct thermal_cooling_device *cdev = NULL;
- u32 capacitance = 0;
if (!np) {
pr_err("cpu_cooling: OF node not available for cpu%d\n",
@@ -700,10 +626,9 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
}
if (of_find_property(np, "#cooling-cells", NULL)) {
- of_property_read_u32(np, "dynamic-power-coefficient",
- &capacitance);
+ struct em_perf_domain *em = em_cpu_get(policy->cpu);
- cdev = __cpufreq_cooling_register(np, policy, capacitance);
+ cdev = __cpufreq_cooling_register(np, policy, em);
if (IS_ERR(cdev)) {
pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
policy->cpu, PTR_ERR(cdev));
@@ -739,7 +664,6 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
freq_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
- kfree(cpufreq_cdev->freq_table);
kfree(cpufreq_cdev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index 5716b62e0f73..f75271b669c6 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -103,6 +104,7 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
int status;
u32 temp_out;
u32 out;
+ unsigned long update_ptps;
u32 store_ptps;
u32 store_ptmc;
u32 store_te_out;
@@ -120,8 +122,10 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
if (status)
return status;
- out = (store_ptps & ~(0xFF << (thres_index * 8)));
- out |= (temp_out & 0xFF) << (thres_index * 8);
+ update_ptps = store_ptps;
+ bitmap_set_value8(&update_ptps, temp_out & 0xFF, thres_index * 8);
+ out = update_ptps;
+
status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
SOC_DTS_OFFSET_PTPS, out);
if (status)
@@ -223,6 +227,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd,
u32 out;
struct intel_soc_dts_sensor_entry *dts;
struct intel_soc_dts_sensors *sensors;
+ unsigned long raw;
dts = tzd->devdata;
sensors = dts->sensors;
@@ -231,8 +236,8 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd,
if (status)
return status;
- out = (out & dts->temp_mask) >> dts->temp_shift;
- out -= SOC_DTS_TJMAX_ENCODING;
+ raw = out;
+ out = bitmap_get_value8(&raw, dts->id * 8) - SOC_DTS_TJMAX_ENCODING;
*temp = sensors->tj_max - out * 1000;
return 0;
@@ -280,11 +285,14 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
int read_only_trip_cnt)
{
char name[10];
+ unsigned long trip;
int trip_count = 0;
int trip_mask = 0;
+ int writable_trip_cnt = 0;
+ unsigned long ptps;
u32 store_ptps;
+ unsigned long i;
int ret;
- int i;
/* Store status to restor on exit */
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
@@ -293,11 +301,10 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
goto err_ret;
dts->id = id;
- dts->temp_mask = 0x00FF << (id * 8);
- dts->temp_shift = id * 8;
if (notification_support) {
trip_count = min(SOC_MAX_DTS_TRIPS, trip_cnt);
- trip_mask = BIT(trip_count - read_only_trip_cnt) - 1;
+ writable_trip_cnt = trip_count - read_only_trip_cnt;
+ trip_mask = GENMASK(writable_trip_cnt - 1, 0);
}
/* Check if the writable trip we provide is not used by BIOS */
@@ -306,11 +313,9 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
if (ret)
trip_mask = 0;
else {
- for (i = 0; i < trip_count; ++i) {
- if (trip_mask & BIT(i))
- if (store_ptps & (0xff << (i * 8)))
- trip_mask &= ~BIT(i);
- }
+ ptps = store_ptps;
+ for_each_set_clump8(i, trip, &ptps, writable_trip_cnt * 8)
+ trip_mask &= ~BIT(i / 8);
}
dts->trip_mask = trip_mask;
dts->trip_count = trip_count;
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.h b/drivers/thermal/intel/intel_soc_dts_iosf.h
index adfb09af33fc..c54945748200 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.h
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.h
@@ -24,8 +24,6 @@ struct intel_soc_dts_sensors;
struct intel_soc_dts_sensor_entry {
int id;
- u32 temp_mask;
- u32 temp_shift;
u32 store_status;
u32 trip_mask;
u32 trip_count;
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index e46a4e3f25c4..fb77acb8d13b 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -245,11 +245,11 @@ static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
return adc_code * slope + offset;
}
-static int get_temp_8960(struct tsens_priv *priv, int id, int *temp)
+static int get_temp_8960(struct tsens_sensor *s, int *temp)
{
int ret;
u32 code, trdy;
- const struct tsens_sensor *s = &priv->sensor[id];
+ struct tsens_priv *priv = s->priv;
unsigned long timeout;
timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index 528df8801254..c8d57ee0a5bb 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/nvmem-consumer.h>
@@ -12,6 +13,31 @@
#include <linux/regmap.h>
#include "tsens.h"
+/**
+ * struct tsens_irq_data - IRQ status and temperature violations
+ * @up_viol: upper threshold violated
+ * @up_thresh: upper threshold temperature value
+ * @up_irq_mask: mask register for upper threshold irqs
+ * @up_irq_clear: clear register for uppper threshold irqs
+ * @low_viol: lower threshold violated
+ * @low_thresh: lower threshold temperature value
+ * @low_irq_mask: mask register for lower threshold irqs
+ * @low_irq_clear: clear register for lower threshold irqs
+ *
+ * Structure containing data about temperature threshold settings and
+ * irq status if they were violated.
+ */
+struct tsens_irq_data {
+ u32 up_viol;
+ int up_thresh;
+ u32 up_irq_mask;
+ u32 up_irq_clear;
+ u32 low_viol;
+ int low_thresh;
+ u32 low_irq_mask;
+ u32 low_irq_clear;
+};
+
char *qfprom_read(struct device *dev, const char *cname)
{
struct nvmem_cell *cell;
@@ -42,8 +68,8 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
for (i = 0; i < priv->num_sensors; i++) {
dev_dbg(priv->dev,
- "sensor%d - data_point1:%#x data_point2:%#x\n",
- i, p1[i], p2[i]);
+ "%s: sensor%d - data_point1:%#x data_point2:%#x\n",
+ __func__, i, p1[i], p2[i]);
priv->sensor[i].slope = SLOPE_DEFAULT;
if (mode == TWO_PT_CALIB) {
@@ -60,10 +86,18 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
priv->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
(CAL_DEGC_PT1 *
priv->sensor[i].slope);
- dev_dbg(priv->dev, "offset:%d\n", priv->sensor[i].offset);
+ dev_dbg(priv->dev, "%s: offset:%d\n", __func__, priv->sensor[i].offset);
}
}
+static inline u32 degc_to_code(int degc, const struct tsens_sensor *s)
+{
+ u64 code = div_u64(((u64)degc * s->slope + s->offset), SLOPE_FACTOR);
+
+ pr_debug("%s: raw_code: 0x%llx, degc:%d\n", __func__, code, degc);
+ return clamp_val(code, THRESHOLD_MIN_ADC_CODE, THRESHOLD_MAX_ADC_CODE);
+}
+
static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
{
int degc, num, den;
@@ -83,12 +117,353 @@ static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
return degc;
}
-int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp)
+/**
+ * tsens_hw_to_mC - Return sign-extended temperature in mCelsius.
+ * @s: Pointer to sensor struct
+ * @field: Index into regmap_field array pointing to temperature data
+ *
+ * This function handles temperature returned in ADC code or deciCelsius
+ * depending on IP version.
+ *
+ * Return: Temperature in milliCelsius on success, a negative errno will
+ * be returned in error cases
+ */
+static int tsens_hw_to_mC(struct tsens_sensor *s, int field)
+{
+ struct tsens_priv *priv = s->priv;
+ u32 resolution;
+ u32 temp = 0;
+ int ret;
+
+ resolution = priv->fields[LAST_TEMP_0].msb -
+ priv->fields[LAST_TEMP_0].lsb;
+
+ ret = regmap_field_read(priv->rf[field], &temp);
+ if (ret)
+ return ret;
+
+ /* Convert temperature from ADC code to milliCelsius */
+ if (priv->feat->adc)
+ return code_to_degc(temp, s) * 1000;
+
+ /* deciCelsius -> milliCelsius along with sign extension */
+ return sign_extend32(temp, resolution) * 100;
+}
+
+/**
+ * tsens_mC_to_hw - Convert temperature to hardware register value
+ * @s: Pointer to sensor struct
+ * @temp: temperature in milliCelsius to be programmed to hardware
+ *
+ * This function outputs the value to be written to hardware in ADC code
+ * or deciCelsius depending on IP version.
+ *
+ * Return: ADC code or temperature in deciCelsius.
+ */
+static int tsens_mC_to_hw(struct tsens_sensor *s, int temp)
+{
+ struct tsens_priv *priv = s->priv;
+
+ /* milliC to adc code */
+ if (priv->feat->adc)
+ return degc_to_code(temp / 1000, s);
+
+ /* milliC to deciC */
+ return temp / 100;
+}
+
+static inline enum tsens_ver tsens_version(struct tsens_priv *priv)
+{
+ return priv->feat->ver_major;
+}
+
+static void tsens_set_interrupt_v1(struct tsens_priv *priv, u32 hw_id,
+ enum tsens_irq_type irq_type, bool enable)
+{
+ u32 index = 0;
+
+ switch (irq_type) {
+ case UPPER:
+ index = UP_INT_CLEAR_0 + hw_id;
+ break;
+ case LOWER:
+ index = LOW_INT_CLEAR_0 + hw_id;
+ break;
+ }
+ regmap_field_write(priv->rf[index], enable ? 0 : 1);
+}
+
+static void tsens_set_interrupt_v2(struct tsens_priv *priv, u32 hw_id,
+ enum tsens_irq_type irq_type, bool enable)
+{
+ u32 index_mask = 0, index_clear = 0;
+
+ /*
+ * To enable the interrupt flag for a sensor:
+ * - clear the mask bit
+ * To disable the interrupt flag for a sensor:
+ * - Mask further interrupts for this sensor
+ * - Write 1 followed by 0 to clear the interrupt
+ */
+ switch (irq_type) {
+ case UPPER:
+ index_mask = UP_INT_MASK_0 + hw_id;
+ index_clear = UP_INT_CLEAR_0 + hw_id;
+ break;
+ case LOWER:
+ index_mask = LOW_INT_MASK_0 + hw_id;
+ index_clear = LOW_INT_CLEAR_0 + hw_id;
+ break;
+ }
+
+ if (enable) {
+ regmap_field_write(priv->rf[index_mask], 0);
+ } else {
+ regmap_field_write(priv->rf[index_mask], 1);
+ regmap_field_write(priv->rf[index_clear], 1);
+ regmap_field_write(priv->rf[index_clear], 0);
+ }
+}
+
+/**
+ * tsens_set_interrupt - Set state of an interrupt
+ * @priv: Pointer to tsens controller private data
+ * @hw_id: Hardware ID aka. sensor number
+ * @irq_type: irq_type from enum tsens_irq_type
+ * @enable: false = disable, true = enable
+ *
+ * Call IP-specific function to set state of an interrupt
+ *
+ * Return: void
+ */
+static void tsens_set_interrupt(struct tsens_priv *priv, u32 hw_id,
+ enum tsens_irq_type irq_type, bool enable)
+{
+ dev_dbg(priv->dev, "[%u] %s: %s -> %s\n", hw_id, __func__,
+ irq_type ? ((irq_type == 1) ? "UP" : "CRITICAL") : "LOW",
+ enable ? "en" : "dis");
+ if (tsens_version(priv) > VER_1_X)
+ tsens_set_interrupt_v2(priv, hw_id, irq_type, enable);
+ else
+ tsens_set_interrupt_v1(priv, hw_id, irq_type, enable);
+}
+
+/**
+ * tsens_threshold_violated - Check if a sensor temperature violated a preset threshold
+ * @priv: Pointer to tsens controller private data
+ * @hw_id: Hardware ID aka. sensor number
+ * @d: Pointer to irq state data
+ *
+ * Return: 0 if threshold was not violated, 1 if it was violated and negative
+ * errno in case of errors
+ */
+static int tsens_threshold_violated(struct tsens_priv *priv, u32 hw_id,
+ struct tsens_irq_data *d)
{
- struct tsens_sensor *s = &priv->sensor[i];
- u32 temp_idx = LAST_TEMP_0 + s->hw_id;
- u32 valid_idx = VALID_0 + s->hw_id;
- u32 last_temp = 0, valid, mask;
+ int ret;
+
+ ret = regmap_field_read(priv->rf[UPPER_STATUS_0 + hw_id], &d->up_viol);
+ if (ret)
+ return ret;
+ ret = regmap_field_read(priv->rf[LOWER_STATUS_0 + hw_id], &d->low_viol);
+ if (ret)
+ return ret;
+ if (d->up_viol || d->low_viol)
+ return 1;
+
+ return 0;
+}
+
+static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id,
+ struct tsens_sensor *s, struct tsens_irq_data *d)
+{
+ int ret;
+
+ ret = regmap_field_read(priv->rf[UP_INT_CLEAR_0 + hw_id], &d->up_irq_clear);
+ if (ret)
+ return ret;
+ ret = regmap_field_read(priv->rf[LOW_INT_CLEAR_0 + hw_id], &d->low_irq_clear);
+ if (ret)
+ return ret;
+ if (tsens_version(priv) > VER_1_X) {
+ ret = regmap_field_read(priv->rf[UP_INT_MASK_0 + hw_id], &d->up_irq_mask);
+ if (ret)
+ return ret;
+ ret = regmap_field_read(priv->rf[LOW_INT_MASK_0 + hw_id], &d->low_irq_mask);
+ if (ret)
+ return ret;
+ } else {
+ /* No mask register on older TSENS */
+ d->up_irq_mask = 0;
+ d->low_irq_mask = 0;
+ }
+
+ d->up_thresh = tsens_hw_to_mC(s, UP_THRESH_0 + hw_id);
+ d->low_thresh = tsens_hw_to_mC(s, LOW_THRESH_0 + hw_id);
+
+ dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u) | clr(%u|%u) | mask(%u|%u)\n",
+ hw_id, __func__, (d->up_viol || d->low_viol) ? "(V)" : "",
+ d->low_viol, d->up_viol, d->low_irq_clear, d->up_irq_clear,
+ d->low_irq_mask, d->up_irq_mask);
+ dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d)\n", hw_id, __func__,
+ (d->up_viol || d->low_viol) ? "(violation)" : "",
+ d->low_thresh, d->up_thresh);
+
+ return 0;
+}
+
+static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
+{
+ if (ver > VER_1_X)
+ return mask & (1 << hw_id);
+
+ /* v1, v0.1 don't have a irq mask register */
+ return 0;
+}
+
+/**
+ * tsens_irq_thread - Threaded interrupt handler for uplow interrupts
+ * @irq: irq number
+ * @data: tsens controller private data
+ *
+ * Check all sensors to find ones that violated their threshold limits. If the
+ * temperature is still outside the limits, call thermal_zone_device_update() to
+ * update the thresholds, else re-enable the interrupts.
+ *
+ * The level-triggered interrupt might deassert if the temperature returned to
+ * within the threshold limits by the time the handler got scheduled. We
+ * consider the irq to have been handled in that case.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t tsens_irq_thread(int irq, void *data)
+{
+ struct tsens_priv *priv = data;
+ struct tsens_irq_data d;
+ bool enable = true, disable = false;
+ unsigned long flags;
+ int temp, ret, i;
+
+ for (i = 0; i < priv->num_sensors; i++) {
+ bool trigger = false;
+ struct tsens_sensor *s = &priv->sensor[i];
+ u32 hw_id = s->hw_id;
+
+ if (IS_ERR(priv->sensor[i].tzd))
+ continue;
+ if (!tsens_threshold_violated(priv, hw_id, &d))
+ continue;
+ ret = get_temp_tsens_valid(s, &temp);
+ if (ret) {
+ dev_err(priv->dev, "[%u] %s: error reading sensor\n", hw_id, __func__);
+ continue;
+ }
+
+ spin_lock_irqsave(&priv->ul_lock, flags);
+
+ tsens_read_irq_state(priv, hw_id, s, &d);
+
+ if (d.up_viol &&
+ !masked_irq(hw_id, d.up_irq_mask, tsens_version(priv))) {
+ tsens_set_interrupt(priv, hw_id, UPPER, disable);
+ if (d.up_thresh > temp) {
+ dev_dbg(priv->dev, "[%u] %s: re-arm upper\n",
+ priv->sensor[i].hw_id, __func__);
+ tsens_set_interrupt(priv, hw_id, UPPER, enable);
+ } else {
+ trigger = true;
+ /* Keep irq masked */
+ }
+ } else if (d.low_viol &&
+ !masked_irq(hw_id, d.low_irq_mask, tsens_version(priv))) {
+ tsens_set_interrupt(priv, hw_id, LOWER, disable);
+ if (d.low_thresh < temp) {
+ dev_dbg(priv->dev, "[%u] %s: re-arm low\n",
+ priv->sensor[i].hw_id, __func__);
+ tsens_set_interrupt(priv, hw_id, LOWER, enable);
+ } else {
+ trigger = true;
+ /* Keep irq masked */
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->ul_lock, flags);
+
+ if (trigger) {
+ dev_dbg(priv->dev, "[%u] %s: TZ update trigger (%d mC)\n",
+ hw_id, __func__, temp);
+ thermal_zone_device_update(priv->sensor[i].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ } else {
+ dev_dbg(priv->dev, "[%u] %s: no violation: %d\n",
+ hw_id, __func__, temp);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int tsens_set_trips(void *_sensor, int low, int high)
+{
+ struct tsens_sensor *s = _sensor;
+ struct tsens_priv *priv = s->priv;
+ struct device *dev = priv->dev;
+ struct tsens_irq_data d;
+ unsigned long flags;
+ int high_val, low_val, cl_high, cl_low;
+ u32 hw_id = s->hw_id;
+
+ dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n",
+ hw_id, __func__, low, high);
+
+ cl_high = clamp_val(high, -40000, 120000);
+ cl_low = clamp_val(low, -40000, 120000);
+
+ high_val = tsens_mC_to_hw(s, cl_high);
+ low_val = tsens_mC_to_hw(s, cl_low);
+
+ spin_lock_irqsave(&priv->ul_lock, flags);
+
+ tsens_read_irq_state(priv, hw_id, s, &d);
+
+ /* Write the new thresholds and clear the status */
+ regmap_field_write(priv->rf[LOW_THRESH_0 + hw_id], low_val);
+ regmap_field_write(priv->rf[UP_THRESH_0 + hw_id], high_val);
+ tsens_set_interrupt(priv, hw_id, LOWER, true);
+ tsens_set_interrupt(priv, hw_id, UPPER, true);
+
+ spin_unlock_irqrestore(&priv->ul_lock, flags);
+
+ dev_dbg(dev, "[%u] %s: (%d:%d)->(%d:%d)\n",
+ s->hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high);
+
+ return 0;
+}
+
+int tsens_enable_irq(struct tsens_priv *priv)
+{
+ int ret;
+ int val = tsens_version(priv) > VER_1_X ? 7 : 1;
+
+ ret = regmap_field_write(priv->rf[INT_EN], val);
+ if (ret < 0)
+ dev_err(priv->dev, "%s: failed to enable interrupts\n", __func__);
+
+ return ret;
+}
+
+void tsens_disable_irq(struct tsens_priv *priv)
+{
+ regmap_field_write(priv->rf[INT_EN], 0);
+}
+
+int get_temp_tsens_valid(struct tsens_sensor *s, int *temp)
+{
+ struct tsens_priv *priv = s->priv;
+ int hw_id = s->hw_id;
+ u32 temp_idx = LAST_TEMP_0 + hw_id;
+ u32 valid_idx = VALID_0 + hw_id;
+ u32 valid;
int ret;
ret = regmap_field_read(priv->rf[valid_idx], &valid);
@@ -106,29 +481,18 @@ int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp)
}
/* Valid bit is set, OK to read the temperature */
- ret = regmap_field_read(priv->rf[temp_idx], &last_temp);
- if (ret)
- return ret;
-
- if (priv->feat->adc) {
- /* Convert temperature from ADC code to milliCelsius */
- *temp = code_to_degc(last_temp, s) * 1000;
- } else {
- mask = GENMASK(priv->fields[LAST_TEMP_0].msb,
- priv->fields[LAST_TEMP_0].lsb);
- /* Convert temperature from deciCelsius to milliCelsius */
- *temp = sign_extend32(last_temp, fls(mask) - 1) * 100;
- }
+ *temp = tsens_hw_to_mC(s, temp_idx);
return 0;
}
-int get_temp_common(struct tsens_priv *priv, int i, int *temp)
+int get_temp_common(struct tsens_sensor *s, int *temp)
{
- struct tsens_sensor *s = &priv->sensor[i];
+ struct tsens_priv *priv = s->priv;
+ int hw_id = s->hw_id;
int last_temp = 0, ret;
- ret = regmap_field_read(priv->rf[LAST_TEMP_0 + s->hw_id], &last_temp);
+ ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp);
if (ret)
return ret;
@@ -137,6 +501,77 @@ int get_temp_common(struct tsens_priv *priv, int i, int *temp)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static int dbg_sensors_show(struct seq_file *s, void *data)
+{
+ struct platform_device *pdev = s->private;
+ struct tsens_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ seq_printf(s, "max: %2d\nnum: %2d\n\n",
+ priv->feat->max_sensors, priv->num_sensors);
+
+ seq_puts(s, " id slope offset\n--------------------------\n");
+ for (i = 0; i < priv->num_sensors; i++) {
+ seq_printf(s, "%8d %8d %8d\n", priv->sensor[i].hw_id,
+ priv->sensor[i].slope, priv->sensor[i].offset);
+ }
+
+ return 0;
+}
+
+static int dbg_version_show(struct seq_file *s, void *data)
+{
+ struct platform_device *pdev = s->private;
+ struct tsens_priv *priv = platform_get_drvdata(pdev);
+ u32 maj_ver, min_ver, step_ver;
+ int ret;
+
+ if (tsens_version(priv) > VER_0_1) {
+ ret = regmap_field_read(priv->rf[VER_MAJOR], &maj_ver);
+ if (ret)
+ return ret;
+ ret = regmap_field_read(priv->rf[VER_MINOR], &min_ver);
+ if (ret)
+ return ret;
+ ret = regmap_field_read(priv->rf[VER_STEP], &step_ver);
+ if (ret)
+ return ret;
+ seq_printf(s, "%d.%d.%d\n", maj_ver, min_ver, step_ver);
+ } else {
+ seq_puts(s, "0.1.0\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dbg_version);
+DEFINE_SHOW_ATTRIBUTE(dbg_sensors);
+
+static void tsens_debug_init(struct platform_device *pdev)
+{
+ struct tsens_priv *priv = platform_get_drvdata(pdev);
+ struct dentry *root, *file;
+
+ root = debugfs_lookup("tsens", NULL);
+ if (!root)
+ priv->debug_root = debugfs_create_dir("tsens", NULL);
+ else
+ priv->debug_root = root;
+
+ file = debugfs_lookup("version", priv->debug_root);
+ if (!file)
+ debugfs_create_file("version", 0444, priv->debug_root,
+ pdev, &dbg_version_fops);
+
+ /* A directory for each instance of the TSENS IP */
+ priv->debug = debugfs_create_dir(dev_name(&pdev->dev), priv->debug_root);
+ debugfs_create_file("sensors", 0444, priv->debug, pdev, &dbg_sensors_fops);
+}
+#else
+static inline void tsens_debug_init(struct platform_device *pdev) {}
+#endif
+
static const struct regmap_config tsens_config = {
.name = "tm",
.reg_bits = 32,
@@ -197,6 +632,15 @@ int __init init_common(struct tsens_priv *priv)
goto err_put_device;
}
+ if (tsens_version(priv) > VER_0_1) {
+ for (i = VER_MAJOR; i <= VER_STEP; i++) {
+ priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[i]);
+ if (IS_ERR(priv->rf[i]))
+ return PTR_ERR(priv->rf[i]);
+ }
+ }
+
priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
priv->fields[TSENS_EN]);
if (IS_ERR(priv->rf[TSENS_EN])) {
@@ -207,7 +651,7 @@ int __init init_common(struct tsens_priv *priv)
if (ret)
goto err_put_device;
if (!enabled) {
- dev_err(dev, "tsens device is not enabled\n");
+ dev_err(dev, "%s: device not enabled\n", __func__);
ret = -ENODEV;
goto err_put_device;
}
@@ -218,24 +662,31 @@ int __init init_common(struct tsens_priv *priv)
ret = PTR_ERR(priv->rf[SENSOR_EN]);
goto err_put_device;
}
- /* now alloc regmap_fields in tm_map */
- for (i = 0, j = LAST_TEMP_0; i < priv->feat->max_sensors; i++, j++) {
- priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
- priv->fields[j]);
- if (IS_ERR(priv->rf[j])) {
- ret = PTR_ERR(priv->rf[j]);
- goto err_put_device;
- }
+ priv->rf[INT_EN] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[INT_EN]);
+ if (IS_ERR(priv->rf[INT_EN])) {
+ ret = PTR_ERR(priv->rf[INT_EN]);
+ goto err_put_device;
}
- for (i = 0, j = VALID_0; i < priv->feat->max_sensors; i++, j++) {
- priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
- priv->fields[j]);
- if (IS_ERR(priv->rf[j])) {
- ret = PTR_ERR(priv->rf[j]);
- goto err_put_device;
+
+ /* This loop might need changes if enum regfield_ids is reordered */
+ for (j = LAST_TEMP_0; j <= UP_THRESH_15; j += 16) {
+ for (i = 0; i < priv->feat->max_sensors; i++) {
+ int idx = j + i;
+
+ priv->rf[idx] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[idx]);
+ if (IS_ERR(priv->rf[idx])) {
+ ret = PTR_ERR(priv->rf[idx]);
+ goto err_put_device;
+ }
}
}
+ spin_lock_init(&priv->ul_lock);
+ tsens_enable_irq(priv);
+ tsens_debug_init(op);
+
return 0;
err_put_device:
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 055647bcee67..4b8dd6de02ce 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -347,9 +347,20 @@ static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = {
/* INTERRUPT ENABLE */
[INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+ /* UPPER/LOWER TEMPERATURE THRESHOLDS */
+ REG_FIELD_FOR_EACH_SENSOR11(LOW_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 0, 9),
+ REG_FIELD_FOR_EACH_SENSOR11(UP_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 10, 19),
+
+ /* UPPER/LOWER INTERRUPTS [CLEAR/STATUS] */
+ REG_FIELD_FOR_EACH_SENSOR11(LOW_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 20, 20),
+ REG_FIELD_FOR_EACH_SENSOR11(UP_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 21, 21),
+
+ /* NO CRITICAL INTERRUPT SUPPORT on v0.1 */
+
/* Sn_STATUS */
REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
/* No VALID field on v0.1 */
+ /* xxx_STATUS bits: 1 == threshold violated */
REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 870f502f2cb6..bd2ddb684a45 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/regmap.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include "tsens.h"
/* ----- SROT ------ */
@@ -17,6 +18,70 @@
#define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004
#define TM_Sn_STATUS_OFF 0x0044
#define TM_TRDY_OFF 0x0084
+#define TM_HIGH_LOW_INT_STATUS_OFF 0x0088
+#define TM_HIGH_LOW_Sn_INT_THRESHOLD_OFF 0x0090
+
+/* eeprom layout data for msm8956/76 (v1) */
+#define MSM8976_BASE0_MASK 0xff
+#define MSM8976_BASE1_MASK 0xff
+#define MSM8976_BASE1_SHIFT 8
+
+#define MSM8976_S0_P1_MASK 0x3f00
+#define MSM8976_S1_P1_MASK 0x3f00000
+#define MSM8976_S2_P1_MASK 0x3f
+#define MSM8976_S3_P1_MASK 0x3f000
+#define MSM8976_S4_P1_MASK 0x3f00
+#define MSM8976_S5_P1_MASK 0x3f00000
+#define MSM8976_S6_P1_MASK 0x3f
+#define MSM8976_S7_P1_MASK 0x3f000
+#define MSM8976_S8_P1_MASK 0x1f8
+#define MSM8976_S9_P1_MASK 0x1f8000
+#define MSM8976_S10_P1_MASK 0xf8000000
+#define MSM8976_S10_P1_MASK_1 0x1
+
+#define MSM8976_S0_P2_MASK 0xfc000
+#define MSM8976_S1_P2_MASK 0xfc000000
+#define MSM8976_S2_P2_MASK 0xfc0
+#define MSM8976_S3_P2_MASK 0xfc0000
+#define MSM8976_S4_P2_MASK 0xfc000
+#define MSM8976_S5_P2_MASK 0xfc000000
+#define MSM8976_S6_P2_MASK 0xfc0
+#define MSM8976_S7_P2_MASK 0xfc0000
+#define MSM8976_S8_P2_MASK 0x7e00
+#define MSM8976_S9_P2_MASK 0x7e00000
+#define MSM8976_S10_P2_MASK 0x7e
+
+#define MSM8976_S0_P1_SHIFT 8
+#define MSM8976_S1_P1_SHIFT 20
+#define MSM8976_S2_P1_SHIFT 0
+#define MSM8976_S3_P1_SHIFT 12
+#define MSM8976_S4_P1_SHIFT 8
+#define MSM8976_S5_P1_SHIFT 20
+#define MSM8976_S6_P1_SHIFT 0
+#define MSM8976_S7_P1_SHIFT 12
+#define MSM8976_S8_P1_SHIFT 3
+#define MSM8976_S9_P1_SHIFT 15
+#define MSM8976_S10_P1_SHIFT 27
+#define MSM8976_S10_P1_SHIFT_1 0
+
+#define MSM8976_S0_P2_SHIFT 14
+#define MSM8976_S1_P2_SHIFT 26
+#define MSM8976_S2_P2_SHIFT 6
+#define MSM8976_S3_P2_SHIFT 18
+#define MSM8976_S4_P2_SHIFT 14
+#define MSM8976_S5_P2_SHIFT 26
+#define MSM8976_S6_P2_SHIFT 6
+#define MSM8976_S7_P2_SHIFT 18
+#define MSM8976_S8_P2_SHIFT 9
+#define MSM8976_S9_P2_SHIFT 21
+#define MSM8976_S10_P2_SHIFT 1
+
+#define MSM8976_CAL_SEL_MASK 0x3
+
+#define MSM8976_CAL_DEGC_PT1 30
+#define MSM8976_CAL_DEGC_PT2 120
+#define MSM8976_SLOPE_FACTOR 1000
+#define MSM8976_SLOPE_DEFAULT 3200
/* eeprom layout data for qcs404/405 (v1) */
#define BASE0_MASK 0x000007f8
@@ -77,6 +142,30 @@
#define CAL_SEL_MASK 7
#define CAL_SEL_SHIFT 0
+static void compute_intercept_slope_8976(struct tsens_priv *priv,
+ u32 *p1, u32 *p2, u32 mode)
+{
+ int i;
+
+ priv->sensor[0].slope = 3313;
+ priv->sensor[1].slope = 3275;
+ priv->sensor[2].slope = 3320;
+ priv->sensor[3].slope = 3246;
+ priv->sensor[4].slope = 3279;
+ priv->sensor[5].slope = 3257;
+ priv->sensor[6].slope = 3234;
+ priv->sensor[7].slope = 3269;
+ priv->sensor[8].slope = 3255;
+ priv->sensor[9].slope = 3239;
+ priv->sensor[10].slope = 3286;
+
+ for (i = 0; i < priv->num_sensors; i++) {
+ priv->sensor[i].offset = (p1[i] * MSM8976_SLOPE_FACTOR) -
+ (MSM8976_CAL_DEGC_PT1 *
+ priv->sensor[i].slope);
+ }
+}
+
static int calibrate_v1(struct tsens_priv *priv)
{
u32 base0 = 0, base1 = 0;
@@ -143,7 +232,72 @@ static int calibrate_v1(struct tsens_priv *priv)
return 0;
}
-/* v1.x: qcs404,405 */
+static int calibrate_8976(struct tsens_priv *priv)
+{
+ int base0 = 0, base1 = 0, i;
+ u32 p1[11], p2[11];
+ int mode = 0, tmp = 0;
+ u32 *qfprom_cdata;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ mode = (qfprom_cdata[4] & MSM8976_CAL_SEL_MASK);
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[2] & MSM8976_BASE1_MASK) >> MSM8976_BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & MSM8976_S0_P2_MASK) >> MSM8976_S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & MSM8976_S1_P2_MASK) >> MSM8976_S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & MSM8976_S2_P2_MASK) >> MSM8976_S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & MSM8976_S3_P2_MASK) >> MSM8976_S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[2] & MSM8976_S4_P2_MASK) >> MSM8976_S4_P2_SHIFT;
+ p2[5] = (qfprom_cdata[2] & MSM8976_S5_P2_MASK) >> MSM8976_S5_P2_SHIFT;
+ p2[6] = (qfprom_cdata[3] & MSM8976_S6_P2_MASK) >> MSM8976_S6_P2_SHIFT;
+ p2[7] = (qfprom_cdata[3] & MSM8976_S7_P2_MASK) >> MSM8976_S7_P2_SHIFT;
+ p2[8] = (qfprom_cdata[4] & MSM8976_S8_P2_MASK) >> MSM8976_S8_P2_SHIFT;
+ p2[9] = (qfprom_cdata[4] & MSM8976_S9_P2_MASK) >> MSM8976_S9_P2_SHIFT;
+ p2[10] = (qfprom_cdata[5] & MSM8976_S10_P2_MASK) >> MSM8976_S10_P2_SHIFT;
+
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 2);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = qfprom_cdata[0] & MSM8976_BASE0_MASK;
+ p1[0] = (qfprom_cdata[0] & MSM8976_S0_P1_MASK) >> MSM8976_S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & MSM8976_S1_P1_MASK) >> MSM8976_S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[1] & MSM8976_S2_P1_MASK) >> MSM8976_S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & MSM8976_S3_P1_MASK) >> MSM8976_S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[2] & MSM8976_S4_P1_MASK) >> MSM8976_S4_P1_SHIFT;
+ p1[5] = (qfprom_cdata[2] & MSM8976_S5_P1_MASK) >> MSM8976_S5_P1_SHIFT;
+ p1[6] = (qfprom_cdata[3] & MSM8976_S6_P1_MASK) >> MSM8976_S6_P1_SHIFT;
+ p1[7] = (qfprom_cdata[3] & MSM8976_S7_P1_MASK) >> MSM8976_S7_P1_SHIFT;
+ p1[8] = (qfprom_cdata[4] & MSM8976_S8_P1_MASK) >> MSM8976_S8_P1_SHIFT;
+ p1[9] = (qfprom_cdata[4] & MSM8976_S9_P1_MASK) >> MSM8976_S9_P1_SHIFT;
+ p1[10] = (qfprom_cdata[4] & MSM8976_S10_P1_MASK) >> MSM8976_S10_P1_SHIFT;
+ tmp = (qfprom_cdata[5] & MSM8976_S10_P1_MASK_1) << MSM8976_S10_P1_SHIFT_1;
+ p1[10] |= tmp;
+
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 2);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope_8976(priv, p1, p2, mode);
+ kfree(qfprom_cdata);
+
+ return 0;
+}
+
+/* v1.x: msm8956,8976,qcs404,405 */
static const struct tsens_features tsens_v1_feat = {
.ver_major = VER_1_X,
@@ -168,9 +322,36 @@ static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
/* INTERRUPT ENABLE */
[INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+ /* UPPER/LOWER TEMPERATURE THRESHOLDS */
+ REG_FIELD_FOR_EACH_SENSOR11(LOW_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 0, 9),
+ REG_FIELD_FOR_EACH_SENSOR11(UP_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 10, 19),
+
+ /* UPPER/LOWER INTERRUPTS [CLEAR/STATUS] */
+ REG_FIELD_FOR_EACH_SENSOR11(LOW_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 20, 20),
+ REG_FIELD_FOR_EACH_SENSOR11(UP_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 21, 21),
+ [LOW_INT_STATUS_0] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 0, 0),
+ [LOW_INT_STATUS_1] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 1, 1),
+ [LOW_INT_STATUS_2] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 2, 2),
+ [LOW_INT_STATUS_3] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 3, 3),
+ [LOW_INT_STATUS_4] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 4, 4),
+ [LOW_INT_STATUS_5] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 5, 5),
+ [LOW_INT_STATUS_6] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 6, 6),
+ [LOW_INT_STATUS_7] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 7, 7),
+ [UP_INT_STATUS_0] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 8, 8),
+ [UP_INT_STATUS_1] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 9, 9),
+ [UP_INT_STATUS_2] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 10, 10),
+ [UP_INT_STATUS_3] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 11, 11),
+ [UP_INT_STATUS_4] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 12, 12),
+ [UP_INT_STATUS_5] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 13, 13),
+ [UP_INT_STATUS_6] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 14, 14),
+ [UP_INT_STATUS_7] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 15, 15),
+
+ /* NO CRITICAL INTERRUPT SUPPORT on v1 */
+
/* Sn_STATUS */
REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
REG_FIELD_FOR_EACH_SENSOR11(VALID, TM_Sn_STATUS_OFF, 14, 14),
+ /* xxx_STATUS bits: 1 == threshold violated */
REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
@@ -192,3 +373,18 @@ const struct tsens_plat_data data_tsens_v1 = {
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
};
+
+static const struct tsens_ops ops_8976 = {
+ .init = init_common,
+ .calibrate = calibrate_8976,
+ .get_temp = get_temp_tsens_valid,
+};
+
+/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */
+const struct tsens_plat_data data_8976 = {
+ .num_sensors = 11,
+ .ops = &ops_8976,
+ .hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10},
+ .feat = &tsens_v1_feat,
+ .fields = tsens_v1_regfields,
+};
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 0a4f2b8fcab6..a4d15e1abfdd 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -50,9 +50,22 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
/* v2 has separate enables for UPPER/LOWER/CRITICAL interrupts */
[INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2),
+ /* TEMPERATURE THRESHOLDS */
+ REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11),
+ REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23),
+
+ /* INTERRUPTS [CLEAR/STATUS/MASK] */
+ REG_FIELD_SPLIT_BITS_0_15(LOW_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(LOW_INT_CLEAR, TM_UPPER_LOWER_INT_CLEAR_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(LOW_INT_MASK, TM_UPPER_LOWER_INT_MASK_OFF),
+ REG_FIELD_SPLIT_BITS_16_31(UP_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF),
+ REG_FIELD_SPLIT_BITS_16_31(UP_INT_CLEAR, TM_UPPER_LOWER_INT_CLEAR_OFF),
+ REG_FIELD_SPLIT_BITS_16_31(UP_INT_MASK, TM_UPPER_LOWER_INT_MASK_OFF),
+
/* Sn_STATUS */
REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11),
REG_FIELD_FOR_EACH_SENSOR16(VALID, TM_Sn_STATUS_OFF, 21, 21),
+ /* xxx_STATUS bits: 1 == threshold violated */
REG_FIELD_FOR_EACH_SENSOR16(MIN_STATUS, TM_Sn_STATUS_OFF, 16, 16),
REG_FIELD_FOR_EACH_SENSOR16(LOWER_STATUS, TM_Sn_STATUS_OFF, 17, 17),
REG_FIELD_FOR_EACH_SENSOR16(UPPER_STATUS, TM_Sn_STATUS_OFF, 18, 18),
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 0627d8615c30..015e7d201598 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -3,9 +3,11 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -14,19 +16,19 @@
static int tsens_get_temp(void *data, int *temp)
{
- const struct tsens_sensor *s = data;
+ struct tsens_sensor *s = data;
struct tsens_priv *priv = s->priv;
- return priv->ops->get_temp(priv, s->id, temp);
+ return priv->ops->get_temp(s, temp);
}
static int tsens_get_trend(void *data, int trip, enum thermal_trend *trend)
{
- const struct tsens_sensor *s = data;
+ struct tsens_sensor *s = data;
struct tsens_priv *priv = s->priv;
if (priv->ops->get_trend)
- return priv->ops->get_trend(priv, s->id, trend);
+ return priv->ops->get_trend(s, trend);
return -ENOTSUPP;
}
@@ -61,6 +63,9 @@ static const struct of_device_id tsens_table[] = {
.compatible = "qcom,msm8974-tsens",
.data = &data_8974,
}, {
+ .compatible = "qcom,msm8976-tsens",
+ .data = &data_8976,
+ }, {
.compatible = "qcom,msm8996-tsens",
.data = &data_8996,
}, {
@@ -77,17 +82,18 @@ MODULE_DEVICE_TABLE(of, tsens_table);
static const struct thermal_zone_of_device_ops tsens_of_ops = {
.get_temp = tsens_get_temp,
.get_trend = tsens_get_trend,
+ .set_trips = tsens_set_trips,
};
static int tsens_register(struct tsens_priv *priv)
{
- int i;
+ int i, ret, irq;
struct thermal_zone_device *tzd;
+ struct platform_device *pdev;
for (i = 0; i < priv->num_sensors; i++) {
priv->sensor[i].priv = priv;
- priv->sensor[i].id = i;
- tzd = devm_thermal_zone_of_sensor_register(priv->dev, i,
+ tzd = devm_thermal_zone_of_sensor_register(priv->dev, priv->sensor[i].hw_id,
&priv->sensor[i],
&tsens_of_ops);
if (IS_ERR(tzd))
@@ -96,7 +102,31 @@ static int tsens_register(struct tsens_priv *priv)
if (priv->ops->enable)
priv->ops->enable(priv, i);
}
- return 0;
+
+ pdev = of_find_device_by_node(priv->dev->of_node);
+ if (!pdev)
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "uplow");
+ if (irq < 0) {
+ ret = irq;
+ goto err_put_device;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, tsens_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(&pdev->dev), priv);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to get irq\n", __func__);
+ goto err_put_device;
+ }
+
+ enable_irq_wake(irq);
+
+err_put_device:
+ put_device(&pdev->dev);
+ return ret;
}
static int tsens_probe(struct platform_device *pdev)
@@ -128,7 +158,7 @@ static int tsens_probe(struct platform_device *pdev)
of_property_read_u32(np, "#qcom,sensors", &num_sensors);
if (num_sensors <= 0) {
- dev_err(dev, "invalid number of sensors\n");
+ dev_err(dev, "%s: invalid number of sensors\n", __func__);
return -EINVAL;
}
@@ -150,12 +180,14 @@ static int tsens_probe(struct platform_device *pdev)
priv->feat = data->feat;
priv->fields = data->fields;
+ platform_set_drvdata(pdev, priv);
+
if (!priv->ops || !priv->ops->init || !priv->ops->get_temp)
return -EINVAL;
ret = priv->ops->init(priv);
if (ret < 0) {
- dev_err(dev, "tsens init failed\n");
+ dev_err(dev, "%s: init failed\n", __func__);
return ret;
}
@@ -163,22 +195,20 @@ static int tsens_probe(struct platform_device *pdev)
ret = priv->ops->calibrate(priv);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
- dev_err(dev, "tsens calibration failed\n");
+ dev_err(dev, "%s: calibration failed\n", __func__);
return ret;
}
}
- ret = tsens_register(priv);
-
- platform_set_drvdata(pdev, priv);
-
- return ret;
+ return tsens_register(priv);
}
static int tsens_remove(struct platform_device *pdev)
{
struct tsens_priv *priv = platform_get_drvdata(pdev);
+ debugfs_remove_recursive(priv->debug_root);
+ tsens_disable_irq(priv);
if (priv->ops->disable)
priv->ops->disable(priv);
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index b89083b61c38..e24a865fbc34 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -13,8 +13,10 @@
#define CAL_DEGC_PT2 120
#define SLOPE_FACTOR 1000
#define SLOPE_DEFAULT 3200
+#define THRESHOLD_MAX_ADC_CODE 0x3ff
+#define THRESHOLD_MIN_ADC_CODE 0x0
-
+#include <linux/interrupt.h>
#include <linux/thermal.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -27,12 +29,16 @@ enum tsens_ver {
VER_2_X,
};
+enum tsens_irq_type {
+ LOWER,
+ UPPER,
+};
+
/**
* struct tsens_sensor - data for each sensor connected to the tsens device
* @priv: tsens device instance that this sensor is connected to
* @tzd: pointer to the thermal zone that this sensor is in
* @offset: offset of temperature adjustment curve
- * @id: Sensor ID
* @hw_id: HW ID can be used in case of platform-specific IDs
* @slope: slope of temperature adjustment curve
* @status: 8960-specific variable to track 8960 and 8660 status register offset
@@ -41,7 +47,6 @@ struct tsens_sensor {
struct tsens_priv *priv;
struct thermal_zone_device *tzd;
int offset;
- unsigned int id;
unsigned int hw_id;
int slope;
u32 status;
@@ -62,13 +67,13 @@ struct tsens_ops {
/* mandatory callbacks */
int (*init)(struct tsens_priv *priv);
int (*calibrate)(struct tsens_priv *priv);
- int (*get_temp)(struct tsens_priv *priv, int i, int *temp);
+ int (*get_temp)(struct tsens_sensor *s, int *temp);
/* optional callbacks */
int (*enable)(struct tsens_priv *priv, int i);
void (*disable)(struct tsens_priv *priv);
int (*suspend)(struct tsens_priv *priv);
int (*resume)(struct tsens_priv *priv);
- int (*get_trend)(struct tsens_priv *priv, int i, enum thermal_trend *trend);
+ int (*get_trend)(struct tsens_sensor *s, enum thermal_trend *trend);
};
#define REG_FIELD_FOR_EACH_SENSOR11(_name, _offset, _startbit, _stopbit) \
@@ -102,22 +107,66 @@ struct tsens_ops {
[_name##_##14] = REG_FIELD(_offset + 56, _startbit, _stopbit), \
[_name##_##15] = REG_FIELD(_offset + 60, _startbit, _stopbit)
-/* reg_field IDs to use as an index into an array */
+#define REG_FIELD_SPLIT_BITS_0_15(_name, _offset) \
+ [_name##_##0] = REG_FIELD(_offset, 0, 0), \
+ [_name##_##1] = REG_FIELD(_offset, 1, 1), \
+ [_name##_##2] = REG_FIELD(_offset, 2, 2), \
+ [_name##_##3] = REG_FIELD(_offset, 3, 3), \
+ [_name##_##4] = REG_FIELD(_offset, 4, 4), \
+ [_name##_##5] = REG_FIELD(_offset, 5, 5), \
+ [_name##_##6] = REG_FIELD(_offset, 6, 6), \
+ [_name##_##7] = REG_FIELD(_offset, 7, 7), \
+ [_name##_##8] = REG_FIELD(_offset, 8, 8), \
+ [_name##_##9] = REG_FIELD(_offset, 9, 9), \
+ [_name##_##10] = REG_FIELD(_offset, 10, 10), \
+ [_name##_##11] = REG_FIELD(_offset, 11, 11), \
+ [_name##_##12] = REG_FIELD(_offset, 12, 12), \
+ [_name##_##13] = REG_FIELD(_offset, 13, 13), \
+ [_name##_##14] = REG_FIELD(_offset, 14, 14), \
+ [_name##_##15] = REG_FIELD(_offset, 15, 15)
+
+#define REG_FIELD_SPLIT_BITS_16_31(_name, _offset) \
+ [_name##_##0] = REG_FIELD(_offset, 16, 16), \
+ [_name##_##1] = REG_FIELD(_offset, 17, 17), \
+ [_name##_##2] = REG_FIELD(_offset, 18, 18), \
+ [_name##_##3] = REG_FIELD(_offset, 19, 19), \
+ [_name##_##4] = REG_FIELD(_offset, 20, 20), \
+ [_name##_##5] = REG_FIELD(_offset, 21, 21), \
+ [_name##_##6] = REG_FIELD(_offset, 22, 22), \
+ [_name##_##7] = REG_FIELD(_offset, 23, 23), \
+ [_name##_##8] = REG_FIELD(_offset, 24, 24), \
+ [_name##_##9] = REG_FIELD(_offset, 25, 25), \
+ [_name##_##10] = REG_FIELD(_offset, 26, 26), \
+ [_name##_##11] = REG_FIELD(_offset, 27, 27), \
+ [_name##_##12] = REG_FIELD(_offset, 28, 28), \
+ [_name##_##13] = REG_FIELD(_offset, 29, 29), \
+ [_name##_##14] = REG_FIELD(_offset, 30, 30), \
+ [_name##_##15] = REG_FIELD(_offset, 31, 31)
+
+/*
+ * reg_field IDs to use as an index into an array
+ * If you change the order of the entries, check the devm_regmap_field_alloc()
+ * calls in init_common()
+ */
enum regfield_ids {
/* ----- SROT ------ */
/* HW_VER */
- VER_MAJOR = 0,
+ VER_MAJOR,
VER_MINOR,
VER_STEP,
/* CTRL_OFFSET */
- TSENS_EN = 3,
+ TSENS_EN,
TSENS_SW_RST,
SENSOR_EN,
CODE_OR_TEMP,
/* ----- TM ------ */
+ /* TRDY */
+ TRDY,
+ /* INTERRUPT ENABLE */
+ INT_EN, /* v2+ has separate enables for crit, upper and lower irq */
/* STATUS */
- LAST_TEMP_0 = 7, /* Last temperature reading */
+ LAST_TEMP_0, /* Last temperature reading */
LAST_TEMP_1,
LAST_TEMP_2,
LAST_TEMP_3,
@@ -133,7 +182,7 @@ enum regfield_ids {
LAST_TEMP_13,
LAST_TEMP_14,
LAST_TEMP_15,
- VALID_0 = 23, /* VALID reading or not */
+ VALID_0, /* VALID reading or not */
VALID_1,
VALID_2,
VALID_3,
@@ -149,38 +198,6 @@ enum regfield_ids {
VALID_13,
VALID_14,
VALID_15,
- MIN_STATUS_0, /* MIN threshold violated */
- MIN_STATUS_1,
- MIN_STATUS_2,
- MIN_STATUS_3,
- MIN_STATUS_4,
- MIN_STATUS_5,
- MIN_STATUS_6,
- MIN_STATUS_7,
- MIN_STATUS_8,
- MIN_STATUS_9,
- MIN_STATUS_10,
- MIN_STATUS_11,
- MIN_STATUS_12,
- MIN_STATUS_13,
- MIN_STATUS_14,
- MIN_STATUS_15,
- MAX_STATUS_0, /* MAX threshold violated */
- MAX_STATUS_1,
- MAX_STATUS_2,
- MAX_STATUS_3,
- MAX_STATUS_4,
- MAX_STATUS_5,
- MAX_STATUS_6,
- MAX_STATUS_7,
- MAX_STATUS_8,
- MAX_STATUS_9,
- MAX_STATUS_10,
- MAX_STATUS_11,
- MAX_STATUS_12,
- MAX_STATUS_13,
- MAX_STATUS_14,
- MAX_STATUS_15,
LOWER_STATUS_0, /* LOWER threshold violated */
LOWER_STATUS_1,
LOWER_STATUS_2,
@@ -197,6 +214,70 @@ enum regfield_ids {
LOWER_STATUS_13,
LOWER_STATUS_14,
LOWER_STATUS_15,
+ LOW_INT_STATUS_0, /* LOWER interrupt status */
+ LOW_INT_STATUS_1,
+ LOW_INT_STATUS_2,
+ LOW_INT_STATUS_3,
+ LOW_INT_STATUS_4,
+ LOW_INT_STATUS_5,
+ LOW_INT_STATUS_6,
+ LOW_INT_STATUS_7,
+ LOW_INT_STATUS_8,
+ LOW_INT_STATUS_9,
+ LOW_INT_STATUS_10,
+ LOW_INT_STATUS_11,
+ LOW_INT_STATUS_12,
+ LOW_INT_STATUS_13,
+ LOW_INT_STATUS_14,
+ LOW_INT_STATUS_15,
+ LOW_INT_CLEAR_0, /* LOWER interrupt clear */
+ LOW_INT_CLEAR_1,
+ LOW_INT_CLEAR_2,
+ LOW_INT_CLEAR_3,
+ LOW_INT_CLEAR_4,
+ LOW_INT_CLEAR_5,
+ LOW_INT_CLEAR_6,
+ LOW_INT_CLEAR_7,
+ LOW_INT_CLEAR_8,
+ LOW_INT_CLEAR_9,
+ LOW_INT_CLEAR_10,
+ LOW_INT_CLEAR_11,
+ LOW_INT_CLEAR_12,
+ LOW_INT_CLEAR_13,
+ LOW_INT_CLEAR_14,
+ LOW_INT_CLEAR_15,
+ LOW_INT_MASK_0, /* LOWER interrupt mask */
+ LOW_INT_MASK_1,
+ LOW_INT_MASK_2,
+ LOW_INT_MASK_3,
+ LOW_INT_MASK_4,
+ LOW_INT_MASK_5,
+ LOW_INT_MASK_6,
+ LOW_INT_MASK_7,
+ LOW_INT_MASK_8,
+ LOW_INT_MASK_9,
+ LOW_INT_MASK_10,
+ LOW_INT_MASK_11,
+ LOW_INT_MASK_12,
+ LOW_INT_MASK_13,
+ LOW_INT_MASK_14,
+ LOW_INT_MASK_15,
+ LOW_THRESH_0, /* LOWER threshold values */
+ LOW_THRESH_1,
+ LOW_THRESH_2,
+ LOW_THRESH_3,
+ LOW_THRESH_4,
+ LOW_THRESH_5,
+ LOW_THRESH_6,
+ LOW_THRESH_7,
+ LOW_THRESH_8,
+ LOW_THRESH_9,
+ LOW_THRESH_10,
+ LOW_THRESH_11,
+ LOW_THRESH_12,
+ LOW_THRESH_13,
+ LOW_THRESH_14,
+ LOW_THRESH_15,
UPPER_STATUS_0, /* UPPER threshold violated */
UPPER_STATUS_1,
UPPER_STATUS_2,
@@ -213,6 +294,70 @@ enum regfield_ids {
UPPER_STATUS_13,
UPPER_STATUS_14,
UPPER_STATUS_15,
+ UP_INT_STATUS_0, /* UPPER interrupt status */
+ UP_INT_STATUS_1,
+ UP_INT_STATUS_2,
+ UP_INT_STATUS_3,
+ UP_INT_STATUS_4,
+ UP_INT_STATUS_5,
+ UP_INT_STATUS_6,
+ UP_INT_STATUS_7,
+ UP_INT_STATUS_8,
+ UP_INT_STATUS_9,
+ UP_INT_STATUS_10,
+ UP_INT_STATUS_11,
+ UP_INT_STATUS_12,
+ UP_INT_STATUS_13,
+ UP_INT_STATUS_14,
+ UP_INT_STATUS_15,
+ UP_INT_CLEAR_0, /* UPPER interrupt clear */
+ UP_INT_CLEAR_1,
+ UP_INT_CLEAR_2,
+ UP_INT_CLEAR_3,
+ UP_INT_CLEAR_4,
+ UP_INT_CLEAR_5,
+ UP_INT_CLEAR_6,
+ UP_INT_CLEAR_7,
+ UP_INT_CLEAR_8,
+ UP_INT_CLEAR_9,
+ UP_INT_CLEAR_10,
+ UP_INT_CLEAR_11,
+ UP_INT_CLEAR_12,
+ UP_INT_CLEAR_13,
+ UP_INT_CLEAR_14,
+ UP_INT_CLEAR_15,
+ UP_INT_MASK_0, /* UPPER interrupt mask */
+ UP_INT_MASK_1,
+ UP_INT_MASK_2,
+ UP_INT_MASK_3,
+ UP_INT_MASK_4,
+ UP_INT_MASK_5,
+ UP_INT_MASK_6,
+ UP_INT_MASK_7,
+ UP_INT_MASK_8,
+ UP_INT_MASK_9,
+ UP_INT_MASK_10,
+ UP_INT_MASK_11,
+ UP_INT_MASK_12,
+ UP_INT_MASK_13,
+ UP_INT_MASK_14,
+ UP_INT_MASK_15,
+ UP_THRESH_0, /* UPPER threshold values */
+ UP_THRESH_1,
+ UP_THRESH_2,
+ UP_THRESH_3,
+ UP_THRESH_4,
+ UP_THRESH_5,
+ UP_THRESH_6,
+ UP_THRESH_7,
+ UP_THRESH_8,
+ UP_THRESH_9,
+ UP_THRESH_10,
+ UP_THRESH_11,
+ UP_THRESH_12,
+ UP_THRESH_13,
+ UP_THRESH_14,
+ UP_THRESH_15,
CRITICAL_STATUS_0, /* CRITICAL threshold violated */
CRITICAL_STATUS_1,
CRITICAL_STATUS_2,
@@ -229,13 +374,38 @@ enum regfield_ids {
CRITICAL_STATUS_13,
CRITICAL_STATUS_14,
CRITICAL_STATUS_15,
- /* TRDY */
- TRDY,
- /* INTERRUPT ENABLE */
- INT_EN, /* Pre-V1, V1.x */
- LOW_INT_EN, /* V2.x */
- UP_INT_EN, /* V2.x */
- CRIT_INT_EN, /* V2.x */
+ MIN_STATUS_0, /* MIN threshold violated */
+ MIN_STATUS_1,
+ MIN_STATUS_2,
+ MIN_STATUS_3,
+ MIN_STATUS_4,
+ MIN_STATUS_5,
+ MIN_STATUS_6,
+ MIN_STATUS_7,
+ MIN_STATUS_8,
+ MIN_STATUS_9,
+ MIN_STATUS_10,
+ MIN_STATUS_11,
+ MIN_STATUS_12,
+ MIN_STATUS_13,
+ MIN_STATUS_14,
+ MIN_STATUS_15,
+ MAX_STATUS_0, /* MAX threshold violated */
+ MAX_STATUS_1,
+ MAX_STATUS_2,
+ MAX_STATUS_3,
+ MAX_STATUS_4,
+ MAX_STATUS_5,
+ MAX_STATUS_6,
+ MAX_STATUS_7,
+ MAX_STATUS_8,
+ MAX_STATUS_9,
+ MAX_STATUS_10,
+ MAX_STATUS_11,
+ MAX_STATUS_12,
+ MAX_STATUS_13,
+ MAX_STATUS_14,
+ MAX_STATUS_15,
/* Keep last */
MAX_REGFIELDS
@@ -295,6 +465,8 @@ struct tsens_context {
* @feat: features of the IP
* @fields: bitfield locations
* @ops: pointer to list of callbacks supported by this device
+ * @debug_root: pointer to debugfs dentry for all tsens
+ * @debug: pointer to debugfs dentry for tsens controller
* @sensor: list of sensors attached to this device
*/
struct tsens_priv {
@@ -303,19 +475,31 @@ struct tsens_priv {
struct regmap *tm_map;
struct regmap *srot_map;
u32 tm_offset;
+
+ /* lock for upper/lower threshold interrupts */
+ spinlock_t ul_lock;
+
struct regmap_field *rf[MAX_REGFIELDS];
struct tsens_context ctx;
const struct tsens_features *feat;
const struct reg_field *fields;
const struct tsens_ops *ops;
+
+ struct dentry *debug_root;
+ struct dentry *debug;
+
struct tsens_sensor sensor[0];
};
char *qfprom_read(struct device *dev, const char *cname);
void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode);
int init_common(struct tsens_priv *priv);
-int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp);
-int get_temp_common(struct tsens_priv *priv, int i, int *temp);
+int get_temp_tsens_valid(struct tsens_sensor *s, int *temp);
+int get_temp_common(struct tsens_sensor *s, int *temp);
+int tsens_enable_irq(struct tsens_priv *priv);
+void tsens_disable_irq(struct tsens_priv *priv);
+int tsens_set_trips(void *_sensor, int low, int high);
+irqreturn_t tsens_irq_thread(int irq, void *data);
/* TSENS target */
extern const struct tsens_plat_data data_8960;
@@ -324,7 +508,7 @@ extern const struct tsens_plat_data data_8960;
extern const struct tsens_plat_data data_8916, data_8974;
/* TSENS v1 targets */
-extern const struct tsens_plat_data data_tsens_v1;
+extern const struct tsens_plat_data data_tsens_v1, data_8976;
/* TSENS v2 targets */
extern const struct tsens_plat_data data_8996, data_tsens_v2;
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 39542c670301..45e9fcb172cc 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -13,7 +13,16 @@
#include "thermal_core.h"
-#define SITES_MAX 16
+#define SITES_MAX 16
+#define TMR_DISABLE 0x0
+#define TMR_ME 0x80000000
+#define TMR_ALPF 0x0c000000
+#define TMR_ALPF_V2 0x03000000
+#define TMTMIR_DEFAULT 0x0000000f
+#define TIER_DISABLE 0x0
+#define TEUMR0_V2 0x51009c00
+#define TMU_VER1 0x1
+#define TMU_VER2 0x2
/*
* QorIQ TMU Registers
@@ -24,17 +33,12 @@ struct qoriq_tmu_site_regs {
u8 res0[0x8];
};
-struct qoriq_tmu_regs {
+struct qoriq_tmu_regs_v1 {
u32 tmr; /* Mode Register */
-#define TMR_DISABLE 0x0
-#define TMR_ME 0x80000000
-#define TMR_ALPF 0x0c000000
u32 tsr; /* Status Register */
u32 tmtmir; /* Temperature measurement interval Register */
-#define TMTMIR_DEFAULT 0x0000000f
u8 res0[0x14];
u32 tier; /* Interrupt Enable Register */
-#define TIER_DISABLE 0x0
u32 tidr; /* Interrupt Detect Register */
u32 tiscr; /* Interrupt Site Capture Register */
u32 ticscr; /* Interrupt Critical Site Capture Register */
@@ -54,10 +58,50 @@ struct qoriq_tmu_regs {
u32 ipbrr0; /* IP Block Revision Register 0 */
u32 ipbrr1; /* IP Block Revision Register 1 */
u8 res6[0x310];
- u32 ttr0cr; /* Temperature Range 0 Control Register */
- u32 ttr1cr; /* Temperature Range 1 Control Register */
- u32 ttr2cr; /* Temperature Range 2 Control Register */
- u32 ttr3cr; /* Temperature Range 3 Control Register */
+ u32 ttrcr[4]; /* Temperature Range Control Register */
+};
+
+struct qoriq_tmu_regs_v2 {
+ u32 tmr; /* Mode Register */
+ u32 tsr; /* Status Register */
+ u32 tmsr; /* monitor site register */
+ u32 tmtmir; /* Temperature measurement interval Register */
+ u8 res0[0x10];
+ u32 tier; /* Interrupt Enable Register */
+ u32 tidr; /* Interrupt Detect Register */
+ u8 res1[0x8];
+ u32 tiiscr; /* interrupt immediate site capture register */
+ u32 tiascr; /* interrupt average site capture register */
+ u32 ticscr; /* Interrupt Critical Site Capture Register */
+ u32 res2;
+ u32 tmhtcr; /* monitor high temperature capture register */
+ u32 tmltcr; /* monitor low temperature capture register */
+ u32 tmrtrcr; /* monitor rising temperature rate capture register */
+ u32 tmftrcr; /* monitor falling temperature rate capture register */
+ u32 tmhtitr; /* High Temperature Immediate Threshold */
+ u32 tmhtatr; /* High Temperature Average Threshold */
+ u32 tmhtactr; /* High Temperature Average Crit Threshold */
+ u32 res3;
+ u32 tmltitr; /* monitor low temperature immediate threshold */
+ u32 tmltatr; /* monitor low temperature average threshold register */
+ u32 tmltactr; /* monitor low temperature average critical threshold */
+ u32 res4;
+ u32 tmrtrctr; /* monitor rising temperature rate critical threshold */
+ u32 tmftrctr; /* monitor falling temperature rate critical threshold*/
+ u8 res5[0x8];
+ u32 ttcfgr; /* Temperature Configuration Register */
+ u32 tscfgr; /* Sensor Configuration Register */
+ u8 res6[0x78];
+ struct qoriq_tmu_site_regs site[SITES_MAX];
+ u8 res7[0x9f8];
+ u32 ipbrr0; /* IP Block Revision Register 0 */
+ u32 ipbrr1; /* IP Block Revision Register 1 */
+ u8 res8[0x300];
+ u32 teumr0;
+ u32 teumr1;
+ u32 teumr2;
+ u32 res9;
+ u32 ttrcr[4]; /* Temperature Range Control Register */
};
struct qoriq_tmu_data;
@@ -72,7 +116,9 @@ struct qoriq_sensor {
};
struct qoriq_tmu_data {
- struct qoriq_tmu_regs __iomem *regs;
+ int ver;
+ struct qoriq_tmu_regs_v1 __iomem *regs;
+ struct qoriq_tmu_regs_v2 __iomem *regs_v2;
struct clk *clk;
bool little_endian;
struct qoriq_sensor *sensor[SITES_MAX];
@@ -132,12 +178,23 @@ static int qoriq_tmu_register_tmu_zone(struct platform_device *pdev)
return PTR_ERR(qdata->sensor[id]->tzd);
}
- sites |= 0x1 << (15 - id);
+ if (qdata->ver == TMU_VER1)
+ sites |= 0x1 << (15 - id);
+ else
+ sites |= 0x1 << id;
}
/* Enable monitoring */
- if (sites != 0)
- tmu_write(qdata, sites | TMR_ME | TMR_ALPF, &qdata->regs->tmr);
+ if (sites != 0) {
+ if (qdata->ver == TMU_VER1) {
+ tmu_write(qdata, sites | TMR_ME | TMR_ALPF,
+ &qdata->regs->tmr);
+ } else {
+ tmu_write(qdata, sites, &qdata->regs_v2->tmsr);
+ tmu_write(qdata, TMR_ME | TMR_ALPF_V2,
+ &qdata->regs_v2->tmr);
+ }
+ }
return 0;
}
@@ -150,16 +207,21 @@ static int qoriq_tmu_calibration(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
- if (of_property_read_u32_array(np, "fsl,tmu-range", range, 4)) {
- dev_err(&pdev->dev, "missing calibration range.\n");
- return -ENODEV;
+ len = of_property_count_u32_elems(np, "fsl,tmu-range");
+ if (len < 0 || len > 4) {
+ dev_err(&pdev->dev, "invalid range data.\n");
+ return len;
+ }
+
+ val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
+ if (val != 0) {
+ dev_err(&pdev->dev, "failed to read range data.\n");
+ return val;
}
/* Init temperature range registers */
- tmu_write(data, range[0], &data->regs->ttr0cr);
- tmu_write(data, range[1], &data->regs->ttr1cr);
- tmu_write(data, range[2], &data->regs->ttr2cr);
- tmu_write(data, range[3], &data->regs->ttr3cr);
+ for (i = 0; i < len; i++)
+ tmu_write(data, range[i], &data->regs->ttrcr[i]);
calibration = of_get_property(np, "fsl,tmu-calibration", &len);
if (calibration == NULL || len % 8) {
@@ -183,7 +245,12 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
tmu_write(data, TIER_DISABLE, &data->regs->tier);
/* Set update_interval */
- tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir);
+ if (data->ver == TMU_VER1) {
+ tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir);
+ } else {
+ tmu_write(data, TMTMIR_DEFAULT, &data->regs_v2->tmtmir);
+ tmu_write(data, TEUMR0_V2, &data->regs_v2->teumr0);
+ }
/* Disable monitoring */
tmu_write(data, TMR_DISABLE, &data->regs->tmr);
@@ -192,6 +259,7 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
static int qoriq_tmu_probe(struct platform_device *pdev)
{
int ret;
+ u32 ver;
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
@@ -220,6 +288,12 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
return ret;
}
+ /* version register offset at: 0xbf8 on both v1 and v2 */
+ ver = tmu_read(data, &data->regs->ipbrr0);
+ data->ver = (ver >> 8) & 0xff;
+ if (data->ver == TMU_VER2)
+ data->regs_v2 = (void __iomem *)data->regs;
+
qoriq_tmu_init_device(data); /* TMU initialization */
ret = qoriq_tmu_calibration(pdev); /* TMU calibration */
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 755d2b5bd2c2..1460cf9d9f1c 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -315,6 +315,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
.data = &rcar_gen3_ths_tj_1_m3_w,
},
{
+ .compatible = "renesas,r8a774b1-thermal",
+ .data = &rcar_gen3_ths_tj_1,
+ },
+ {
.compatible = "renesas,r8a7795-thermal",
.data = &rcar_gen3_ths_tj_1,
},
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index dcecf2e8dc8e..ae5743c9a894 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -134,7 +134,8 @@ static int gadc_thermal_probe(struct platform_device *pdev)
gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel");
if (IS_ERR(gti->channel)) {
ret = PTR_ERR(gti->channel);
- dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
return ret;
}
@@ -142,8 +143,10 @@ static int gadc_thermal_probe(struct platform_device *pdev)
&gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
ret = PTR_ERR(gti->tz_dev);
- dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n",
- ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Thermal zone sensor register failed: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d4481cc8958f..9a321dc548c8 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -19,8 +19,6 @@
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/of.h>
-#include <net/netlink.h>
-#include <net/genetlink.h>
#include <linux/suspend.h>
#define CREATE_TRACE_POINTS
@@ -304,7 +302,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
&tz->poll_queue,
msecs_to_jiffies(delay));
else
- cancel_delayed_work_sync(&tz->poll_queue);
+ cancel_delayed_work(&tz->poll_queue);
}
static void monitor_thermal_zone(struct thermal_zone_device *tz)
@@ -1414,7 +1412,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
mutex_unlock(&thermal_list_lock);
- thermal_zone_device_set_polling(tz, 0);
+ cancel_delayed_work_sync(&tz->poll_queue);
thermal_set_governor(tz, NULL);
@@ -1464,97 +1462,6 @@ exit:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
-#ifdef CONFIG_NET
-static const struct genl_multicast_group thermal_event_mcgrps[] = {
- { .name = THERMAL_GENL_MCAST_GROUP_NAME, },
-};
-
-static struct genl_family thermal_event_genl_family __ro_after_init = {
- .module = THIS_MODULE,
- .name = THERMAL_GENL_FAMILY_NAME,
- .version = THERMAL_GENL_VERSION,
- .maxattr = THERMAL_GENL_ATTR_MAX,
- .mcgrps = thermal_event_mcgrps,
- .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps),
-};
-
-int thermal_generate_netlink_event(struct thermal_zone_device *tz,
- enum events event)
-{
- struct sk_buff *skb;
- struct nlattr *attr;
- struct thermal_genl_event *thermal_event;
- void *msg_header;
- int size;
- int result;
- static unsigned int thermal_event_seqnum;
-
- if (!tz)
- return -EINVAL;
-
- /* allocate memory */
- size = nla_total_size(sizeof(struct thermal_genl_event)) +
- nla_total_size(0);
-
- skb = genlmsg_new(size, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- /* add the genetlink message header */
- msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
- &thermal_event_genl_family, 0,
- THERMAL_GENL_CMD_EVENT);
- if (!msg_header) {
- nlmsg_free(skb);
- return -ENOMEM;
- }
-
- /* fill the data */
- attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT,
- sizeof(struct thermal_genl_event));
-
- if (!attr) {
- nlmsg_free(skb);
- return -EINVAL;
- }
-
- thermal_event = nla_data(attr);
- if (!thermal_event) {
- nlmsg_free(skb);
- return -EINVAL;
- }
-
- memset(thermal_event, 0, sizeof(struct thermal_genl_event));
-
- thermal_event->orig = tz->id;
- thermal_event->event = event;
-
- /* send multicast genetlink message */
- genlmsg_end(skb, msg_header);
-
- result = genlmsg_multicast(&thermal_event_genl_family, skb, 0,
- 0, GFP_ATOMIC);
- if (result)
- dev_err(&tz->device, "Failed to send netlink event:%d", result);
-
- return result;
-}
-EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
-
-static int __init genetlink_init(void)
-{
- return genl_register_family(&thermal_event_genl_family);
-}
-
-static void genetlink_exit(void)
-{
- genl_unregister_family(&thermal_event_genl_family);
-}
-#else /* !CONFIG_NET */
-static inline int genetlink_init(void) { return 0; }
-static inline void genetlink_exit(void) {}
-#endif /* !CONFIG_NET */
-
static int thermal_pm_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
@@ -1607,13 +1514,9 @@ static int __init thermal_init(void)
if (result)
goto unregister_governors;
- result = genetlink_init();
- if (result)
- goto unregister_class;
-
result = of_parse_thermal_zones();
if (result)
- goto exit_netlink;
+ goto unregister_class;
result = register_pm_notifier(&thermal_pm_nb);
if (result)
@@ -1622,8 +1525,6 @@ static int __init thermal_init(void)
return 0;
-exit_netlink:
- genetlink_exit();
unregister_class:
class_unregister(&thermal_class);
unregister_governors:
@@ -1636,4 +1537,4 @@ error:
mutex_destroy(&poweroff_lock);
return result;
}
-fs_initcall(thermal_init);
+core_initcall(thermal_init);
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index 40524fa13533..d0bdf1ea3331 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -110,7 +110,6 @@ static struct platform_driver thermal_mmio_driver = {
.probe = thermal_mmio_probe,
.driver = {
.name = "thermal-mmio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(thermal_mmio_id_table),
},
};
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index c7623f99ac0f..a312cb33a99b 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -82,20 +82,20 @@ config HW_CONSOLE
default y
config VT_HW_CONSOLE_BINDING
- bool "Support for binding and unbinding console drivers"
- depends on HW_CONSOLE
- ---help---
- The virtual terminal is the device that interacts with the physical
- terminal through console drivers. On these systems, at least one
- console driver is loaded. In other configurations, additional console
- drivers may be enabled, such as the framebuffer console. If more than
- 1 console driver is enabled, setting this to 'y' will allow you to
- select the console driver that will serve as the backend for the
- virtual terminals.
-
- See <file:Documentation/driver-api/console.rst> for more
- information. For framebuffer console users, please refer to
- <file:Documentation/fb/fbcon.rst>.
+ bool "Support for binding and unbinding console drivers"
+ depends on HW_CONSOLE
+ ---help---
+ The virtual terminal is the device that interacts with the physical
+ terminal through console drivers. On these systems, at least one
+ console driver is loaded. In other configurations, additional console
+ drivers may be enabled, such as the framebuffer console. If more than
+ 1 console driver is enabled, setting this to 'y' will allow you to
+ select the console driver that will serve as the backend for the
+ virtual terminals.
+
+ See <file:Documentation/driver-api/console.rst> for more
+ information. For framebuffer console users, please refer to
+ <file:Documentation/fb/fbcon.rst>.
config UNIX98_PTYS
bool "Unix98 PTY support" if EXPERT
@@ -173,15 +173,15 @@ config ROCKETPORT
depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
help
This driver supports Comtrol RocketPort and RocketModem PCI boards.
- These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
- modems. For information about the RocketPort/RocketModem boards
- and this driver read <file:Documentation/driver-api/serial/rocket.rst>.
+ These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
+ modems. For information about the RocketPort/RocketModem boards
+ and this driver read <file:Documentation/driver-api/serial/rocket.rst>.
To compile this driver as a module, choose M here: the
module will be called rocket.
If you want to compile this driver into the kernel, say Y here. If
- you don't have a Comtrol RocketPort/RocketModem card installed, say N.
+ you don't have a Comtrol RocketPort/RocketModem card installed, say N.
config CYCLADES
tristate "Cyclades async mux support"
@@ -437,8 +437,8 @@ config MIPS_EJTAG_FDC_KGDB
depends on MIPS_EJTAG_FDC_TTY && KGDB
default y
help
- This enables the use of KGDB over an FDC channel, allowing KGDB to be
- used remotely or when a serial port isn't available.
+ This enables the use of KGDB over an FDC channel, allowing KGDB to be
+ used remotely or when a serial port isn't available.
config MIPS_EJTAG_FDC_KGDB_CHAN
int "KGDB FDC channel"
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 8330fd809a05..13f63c01c589 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -22,18 +22,8 @@
*
*/
-/*
- * Serial driver configuration section. Here are the various options:
- *
- * SERIAL_PARANOIA_CHECK
- * Check the magic number for the async_structure where
- * ever possible.
- */
-
#include <linux/delay.h>
-#undef SERIAL_PARANOIA_CHECK
-
/* Set of debugging defines */
#undef SERIAL_DEBUG_INTR
@@ -132,28 +122,6 @@ static struct serial_state rs_table[1];
#define serial_isroot() (capable(CAP_SYS_ADMIN))
-
-static inline int serial_paranoia_check(struct serial_state *info,
- char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
- static const char *badmagic =
- "Warning: bad magic number for serial struct (%s) in %s\n";
- static const char *badinfo =
- "Warning: null async_struct for (%s) in %s\n";
-
- if (!info) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (info->magic != SERIAL_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
/* some serial hardware definitions */
#define SDR_OVRUN (1<<15)
#define SDR_RBF (1<<14)
@@ -189,9 +157,6 @@ static void rs_stop(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_stop"))
- return;
-
local_irq_save(flags);
if (info->IER & UART_IER_THRI) {
info->IER &= ~UART_IER_THRI;
@@ -209,9 +174,6 @@ static void rs_start(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_start"))
- return;
-
local_irq_save(flags);
if (info->xmit.head != info->xmit.tail
&& info->xmit.buf
@@ -783,9 +745,6 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch)
info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_put_char"))
- return 0;
-
if (!info->xmit.buf)
return 0;
@@ -808,9 +767,6 @@ static void rs_flush_chars(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
- return;
-
if (info->xmit.head == info->xmit.tail
|| tty->stopped
|| tty->hw_stopped
@@ -833,9 +789,6 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_write"))
- return 0;
-
if (!info->xmit.buf)
return 0;
@@ -878,8 +831,6 @@ static int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_write_room"))
- return 0;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
@@ -887,8 +838,6 @@ static int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
- return 0;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
@@ -897,8 +846,6 @@ static void rs_flush_buffer(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
- return;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
@@ -914,9 +861,6 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_send_xchar"))
- return;
-
info->x_char = ch;
if (ch) {
/* Make sure transmit interrupts are on */
@@ -952,9 +896,6 @@ static void rs_throttle(struct tty_struct * tty)
printk("throttle %s ....\n", tty_name(tty));
#endif
- if (serial_paranoia_check(info, tty->name, "rs_throttle"))
- return;
-
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
@@ -974,9 +915,6 @@ static void rs_unthrottle(struct tty_struct * tty)
printk("unthrottle %s ....\n", tty_name(tty));
#endif
- if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
- return;
-
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
@@ -1109,8 +1047,6 @@ static int rs_tiocmget(struct tty_struct *tty)
unsigned char control, status;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
if (tty_io_error(tty))
return -EIO;
@@ -1131,8 +1067,6 @@ static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
if (tty_io_error(tty))
return -EIO;
@@ -1155,12 +1089,8 @@ static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
*/
static int rs_break(struct tty_struct *tty, int break_state)
{
- struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_break"))
- return -EINVAL;
-
local_irq_save(flags);
if (break_state == -1)
custom.adkcon = AC_SETCLR | AC_UARTBRK;
@@ -1212,9 +1142,6 @@ static int rs_ioctl(struct tty_struct *tty,
DEFINE_WAIT(wait);
int ret;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
-
if ((cmd != TIOCSERCONFIG) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
if (tty_io_error(tty))
@@ -1333,9 +1260,6 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
struct serial_state *state = tty->driver_data;
struct tty_port *port = &state->tport;
- if (serial_paranoia_check(state, tty->name, "rs_close"))
- return;
-
if (tty_port_close_start(port, tty, filp) == 0)
return;
@@ -1379,9 +1303,6 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
unsigned long orig_jiffies, char_time;
int lsr;
- if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
- return;
-
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
@@ -1440,9 +1361,6 @@ static void rs_hangup(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_hangup"))
- return;
-
rs_flush_buffer(tty);
shutdown(tty, info);
info->tport.count = 0;
@@ -1467,8 +1385,6 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
port->tty = tty;
tty->driver_data = info;
tty->port = port;
- if (serial_paranoia_check(info, tty->name, "rs_open"))
- return -ENODEV;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4487a6b9acc8..6a3c97d345a0 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -70,22 +70,22 @@ config HVC_XEN_FRONTEND
Xen driver for secondary virtual consoles
config HVC_UDBG
- bool "udbg based fake hypervisor console"
- depends on PPC
- select HVC_DRIVER
- help
- This is meant to be used during HW bring up or debugging when
- no other console mechanism exist but udbg, to get you a quick
- console for userspace. Do NOT enable in production kernels.
+ bool "udbg based fake hypervisor console"
+ depends on PPC
+ select HVC_DRIVER
+ help
+ This is meant to be used during HW bring up or debugging when
+ no other console mechanism exist but udbg, to get you a quick
+ console for userspace. Do NOT enable in production kernels.
config HVC_DCC
- bool "ARM JTAG DCC console"
- depends on ARM || ARM64
- select HVC_DRIVER
- help
- This console uses the JTAG DCC on ARM to create a console under the HVC
- driver. This console is used through a JTAG only on ARM. If you don't have
- a JTAG then you probably don't want this option.
+ bool "ARM JTAG DCC console"
+ depends on ARM || ARM64
+ select HVC_DRIVER
+ help
+ This console uses the JTAG DCC on ARM to create a console under the HVC
+ driver. This console is used through a JTAG only on ARM. If you don't have
+ a JTAG then you probably don't want this option.
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 02629a1f193d..8e0edb7d93fd 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. */
+#include <linux/console.h>
#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <asm/dcc.h>
#include <asm/processor.h>
@@ -12,6 +15,31 @@
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
+static void dcc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (__dcc_getstatus() & DCC_STATUS_TX)
+ cpu_relax();
+
+ __dcc_putchar(ch);
+}
+
+static void dcc_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, dcc_uart_console_putchar);
+}
+
+static int __init dcc_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = dcc_early_write;
+
+ return 0;
+}
+
+EARLYCON_DECLARE(dcc, dcc_early_console_setup);
+
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 5ba6816ebf81..fbaa4ec85560 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1222,22 +1222,28 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
*/
static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
{
- struct rocket_ports tmp;
- int board;
+ struct rocket_ports *tmp;
+ int board, ret = 0;
- memset(&tmp, 0, sizeof (tmp));
- tmp.tty_major = rocket_driver->major;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->tty_major = rocket_driver->major;
for (board = 0; board < 4; board++) {
- tmp.rocketModel[board].model = rocketModel[board].model;
- strcpy(tmp.rocketModel[board].modelString, rocketModel[board].modelString);
- tmp.rocketModel[board].numPorts = rocketModel[board].numPorts;
- tmp.rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
- tmp.rocketModel[board].startingPortNumber = rocketModel[board].startingPortNumber;
- }
- if (copy_to_user(retports, &tmp, sizeof (*retports)))
- return -EFAULT;
- return 0;
+ tmp->rocketModel[board].model = rocketModel[board].model;
+ strcpy(tmp->rocketModel[board].modelString,
+ rocketModel[board].modelString);
+ tmp->rocketModel[board].numPorts = rocketModel[board].numPorts;
+ tmp->rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
+ tmp->rocketModel[board].startingPortNumber =
+ rocketModel[board].startingPortNumber;
+ }
+ if (copy_to_user(retports, tmp, sizeof(*retports)))
+ ret = -EFAULT;
+ kfree(tmp);
+ return ret;
}
static int reset_rm2(struct r_port *info, void __user *arg)
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index a0ac16ee6575..226adeec2aed 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -552,16 +552,97 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
}
#ifdef CONFIG_ACPI
+
+#define SERDEV_ACPI_MAX_SCAN_DEPTH 32
+
+struct acpi_serdev_lookup {
+ acpi_handle device_handle;
+ acpi_handle controller_handle;
+ int n;
+ int index;
+};
+
+static int acpi_serdev_parse_resource(struct acpi_resource *ares, void *data)
+{
+ struct acpi_serdev_lookup *lookup = data;
+ struct acpi_resource_uart_serialbus *sb;
+ acpi_status status;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+ return 1;
+
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
+ return 1;
+
+ sb = &ares->data.uart_serial_bus;
+
+ status = acpi_get_handle(lookup->device_handle,
+ sb->resource_source.string_ptr,
+ &lookup->controller_handle);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ /*
+ * NOTE: Ideally, we would also want to retreive other properties here,
+ * once setting them before opening the device is supported by serdev.
+ */
+
+ return 1;
+}
+
+static int acpi_serdev_do_lookup(struct acpi_device *adev,
+ struct acpi_serdev_lookup *lookup)
+{
+ struct list_head resource_list;
+ int ret;
+
+ lookup->device_handle = acpi_device_handle(adev);
+ lookup->controller_handle = NULL;
+ lookup->n = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_serdev_parse_resource, lookup);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
+ struct acpi_device *adev)
+{
+ struct acpi_serdev_lookup lookup;
+ int ret;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return -EINVAL;
+
+ /* Look for UARTSerialBusV2 resource */
+ lookup.index = -1; // we only care for the last device
+
+ ret = acpi_serdev_do_lookup(adev, &lookup);
+ if (ret)
+ return ret;
+
+ /* Make sure controller and ResourceSource handle match */
+ if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
+ return -ENODEV;
+
+ return 0;
+}
+
static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
- struct acpi_device *adev)
+ struct acpi_device *adev)
{
- struct serdev_device *serdev = NULL;
+ struct serdev_device *serdev;
int err;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
- return AE_OK;
-
serdev = serdev_device_alloc(ctrl);
if (!serdev) {
dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
@@ -583,7 +664,7 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
}
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+ void *data, void **return_value)
{
struct serdev_controller *ctrl = data;
struct acpi_device *adev;
@@ -591,22 +672,28 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
+ if (acpi_device_enumerated(adev))
+ return AE_OK;
+
+ if (acpi_serdev_check_resources(ctrl, adev))
+ return AE_OK;
+
return acpi_serdev_register_device(ctrl, adev);
}
+
static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
{
acpi_status status;
- acpi_handle handle;
- handle = ACPI_HANDLE(ctrl->dev.parent);
- if (!handle)
+ if (!has_acpi_companion(ctrl->dev.parent))
return -ENODEV;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ SERDEV_ACPI_MAX_SCAN_DEPTH,
acpi_serdev_add_device, NULL, ctrl, NULL);
if (ACPI_FAILURE(status))
- dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
+ dev_warn(&ctrl->dev, "failed to enumerate serdev slaves\n");
if (!ctrl->serdev)
return -ENODEV;
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 0438d9a905ce..6e67fd89445a 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -14,6 +14,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/clk.h>
@@ -22,6 +24,7 @@
#define ASPEED_VUART_GCRA 0x20
#define ASPEED_VUART_GCRA_VUART_EN BIT(0)
+#define ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY BIT(1)
#define ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5)
#define ASPEED_VUART_GCRB 0x24
#define ASPEED_VUART_GCRB_HOST_SIRQ_MASK GENMASK(7, 4)
@@ -131,8 +134,53 @@ static ssize_t sirq_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(sirq);
+static ssize_t sirq_polarity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+ u8 reg;
+
+ reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+ reg &= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%u\n", reg ? 1 : 0);
+}
+
+static void aspeed_vuart_set_sirq_polarity(struct aspeed_vuart *vuart,
+ bool polarity)
+{
+ u8 reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+
+ if (polarity)
+ reg |= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+ else
+ reg &= ~ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+
+ writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+}
+
+static ssize_t sirq_polarity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ aspeed_vuart_set_sirq_polarity(vuart, val != 0);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(sirq_polarity);
+
static struct attribute *aspeed_vuart_attrs[] = {
&dev_attr_sirq.attr,
+ &dev_attr_sirq_polarity.attr,
&dev_attr_lpc_address.attr,
NULL,
};
@@ -302,8 +350,30 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
return 1;
}
+static void aspeed_vuart_auto_configure_sirq_polarity(
+ struct aspeed_vuart *vuart, struct device_node *syscon_np,
+ u32 reg_offset, u32 reg_mask)
+{
+ struct regmap *regmap;
+ u32 value;
+
+ regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(regmap)) {
+ dev_warn(vuart->dev,
+ "could not get regmap for aspeed,sirq-polarity-sense\n");
+ return;
+ }
+ if (regmap_read(regmap, reg_offset, &value)) {
+ dev_warn(vuart->dev, "could not read hw strap table\n");
+ return;
+ }
+
+ aspeed_vuart_set_sirq_polarity(vuart, (value & reg_mask) == 0);
+}
+
static int aspeed_vuart_probe(struct platform_device *pdev)
{
+ struct of_phandle_args sirq_polarity_sense_args;
struct uart_8250_port port;
struct aspeed_vuart *vuart;
struct device_node *np;
@@ -402,6 +472,20 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
vuart->line = rc;
+ rc = of_parse_phandle_with_fixed_args(
+ np, "aspeed,sirq-polarity-sense", 2, 0,
+ &sirq_polarity_sense_args);
+ if (rc < 0) {
+ dev_dbg(&pdev->dev,
+ "aspeed,sirq-polarity-sense property not found\n");
+ } else {
+ aspeed_vuart_auto_configure_sirq_polarity(
+ vuart, sirq_polarity_sense_args.np,
+ sirq_polarity_sense_args.args[0],
+ BIT(sirq_polarity_sense_args.args[1]));
+ of_node_put(sirq_polarity_sense_args.np);
+ }
+
aspeed_vuart_set_enabled(vuart, true);
aspeed_vuart_set_host_tx_discard(vuart, true);
platform_set_drvdata(pdev, vuart);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1c72fdc2dd37..aab3cccc6789 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -280,9 +280,6 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
- if (IS_ERR(d->clk))
- goto out;
-
clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, baud * 16);
if (rate < 0)
@@ -293,8 +290,10 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, rate);
clk_prepare_enable(d->clk);
- if (!ret)
- p->uartclk = rate;
+ if (ret)
+ goto out;
+
+ p->uartclk = rate;
out:
p->status &= ~UPSTAT_AUTOCTS;
@@ -386,10 +385,10 @@ static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {}, *up = &uart;
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int irq = platform_get_irq(pdev, 0);
struct uart_port *p = &up->port;
struct device *dev = &pdev->dev;
struct dw8250_data *data;
+ int irq;
int err;
u32 val;
@@ -398,11 +397,9 @@ static int dw8250_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "cannot get irq\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return irq;
- }
spin_lock_init(&p->lock);
p->mapbase = regs->start;
@@ -472,19 +469,18 @@ static int dw8250_probe(struct platform_device *pdev)
device_property_read_u32(dev, "clock-frequency", &p->uartclk);
/* If there is separate baudclk, get the rate from it. */
- data->clk = devm_clk_get(dev, "baudclk");
- if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER)
- data->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR_OR_NULL(data->clk)) {
- err = clk_prepare_enable(data->clk);
- if (err)
- dev_warn(dev, "could not enable optional baudclk: %d\n",
- err);
- else
- p->uartclk = clk_get_rate(data->clk);
- }
+ data->clk = devm_clk_get_optional(dev, "baudclk");
+ if (data->clk == NULL)
+ data->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ err = clk_prepare_enable(data->clk);
+ if (err)
+ dev_warn(dev, "could not enable optional baudclk: %d\n", err);
+
+ if (data->clk)
+ p->uartclk = clk_get_rate(data->clk);
/* If no clock rate is defined, fail. */
if (!p->uartclk) {
@@ -493,17 +489,16 @@ static int dw8250_probe(struct platform_device *pdev)
goto err_clk;
}
- data->pclk = devm_clk_get(dev, "apb_pclk");
- if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
+ data->pclk = devm_clk_get_optional(dev, "apb_pclk");
+ if (IS_ERR(data->pclk)) {
+ err = PTR_ERR(data->pclk);
goto err_clk;
}
- if (!IS_ERR(data->pclk)) {
- err = clk_prepare_enable(data->pclk);
- if (err) {
- dev_err(dev, "could not enable apb_pclk\n");
- goto err_clk;
- }
+
+ err = clk_prepare_enable(data->pclk);
+ if (err) {
+ dev_err(dev, "could not enable apb_pclk\n");
+ goto err_clk;
}
data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
@@ -546,12 +541,10 @@ err_reset:
reset_control_assert(data->rst);
err_pclk:
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
err_clk:
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
return err;
}
@@ -567,11 +560,9 @@ static int dw8250_remove(struct platform_device *pdev)
reset_control_assert(data->rst);
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
@@ -604,11 +595,9 @@ static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
return 0;
}
@@ -617,11 +606,9 @@ static int dw8250_runtime_resume(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- if (!IS_ERR(data->pclk))
- clk_prepare_enable(data->pclk);
+ clk_prepare_enable(data->pclk);
- if (!IS_ERR(data->clk))
- clk_prepare_enable(data->clk);
+ clk_prepare_enable(data->clk);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 597eb9d16f21..108cd55f9c4d 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -166,6 +166,23 @@ static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
serial_port_out(p, 0x2, quot_frac);
}
+static int xr17v35x_startup(struct uart_port *port)
+{
+ /*
+ * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
+ * MCR [7:5] and MSR [7:0]
+ */
+ serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
+
+ /*
+ * Make sure all interrups are masked until initialization is
+ * complete and the FIFOs are cleared
+ */
+ serial_port_out(port, UART_IER, 0);
+
+ return serial8250_do_startup(port);
+}
+
static void exar_shutdown(struct uart_port *port)
{
unsigned char lsr;
@@ -212,6 +229,8 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.get_divisor = xr17v35x_get_divisor;
port->port.set_divisor = xr17v35x_set_divisor;
+
+ port->port.startup = xr17v35x_startup;
} else {
port->port.type = PORT_XR17D15X;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 5f72ef3ea574..60eff3240c8a 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -221,17 +221,6 @@ static void qrk_serial_exit_dma(struct lpss8250 *lpss) {}
static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
{
- struct pci_dev *pdev = to_pci_dev(port->dev);
- int ret;
-
- pci_set_master(pdev);
-
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
- if (ret < 0)
- return ret;
-
- port->irq = pci_irq_vector(pdev, 0);
-
qrk_serial_setup_dma(lpss, port);
return 0;
}
@@ -293,16 +282,22 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ pci_set_master(pdev);
+
lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL);
if (!lpss)
return -ENOMEM;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
lpss->board = (struct lpss8250_board *)id->driver_data;
memset(&uart, 0, sizeof(struct uart_8250_port));
uart.port.dev = &pdev->dev;
- uart.port.irq = pdev->irq;
+ uart.port.irq = pci_irq_vector(pdev, 0);
uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
uart.port.iotype = UPIO_MEM;
@@ -337,6 +332,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_exit:
if (lpss->board->exit)
lpss->board->exit(lpss);
+ pci_free_irq_vectors(pdev);
return ret;
}
@@ -348,6 +344,7 @@ static void lpss8250_remove(struct pci_dev *pdev)
if (lpss->board->exit)
lpss->board->exit(lpss);
+ pci_free_irq_vectors(pdev);
}
static const struct lpss8250_board byt_board = {
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index b411ba4eb5e9..4d067f515f74 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -544,7 +544,7 @@ static int mtk8250_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- data->rx_wakeup_irq = platform_get_irq(pdev, 1);
+ data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 0826cfdbd406..92fbf46ce3bd 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -48,6 +48,36 @@ static inline void tegra_serial_handle_break(struct uart_port *port)
}
#endif
+static int of_8250_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* Clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
+ port->rs485 = *rs485;
+
+ /*
+ * Both serial8250_em485_init and serial8250_em485_destroy
+ * are idempotent
+ */
+ if (rs485->flags & SER_RS485_ENABLED) {
+ int ret = serial8250_em485_init(up);
+
+ if (ret) {
+ rs485->flags &= ~SER_RS485_ENABLED;
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
+ return ret;
+ }
+
+ serial8250_em485_destroy(up);
+
+ return 0;
+}
+
/*
* Fill a struct uart_port for a given device node
*/
@@ -178,6 +208,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->flags |= UPF_SKIP_TEST;
port->dev = &ofdev->dev;
+ port->rs485_config = of_8250_rs485_config;
switch (type) {
case PORT_TEGRA:
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6adbadd6a56a..022924d5ad54 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -48,8 +48,6 @@ struct f815xxa_data {
int idx;
};
-#define PCI_NUM_BAR_RESOURCES 6
-
struct serial_private {
struct pci_dev *dev;
unsigned int nr;
@@ -89,7 +87,7 @@ setup_port(struct serial_private *priv, struct uart_8250_port *port,
{
struct pci_dev *dev = priv->dev;
- if (bar >= PCI_NUM_BAR_RESOURCES)
+ if (bar >= PCI_STD_NUM_BARS)
return -EINVAL;
if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
@@ -745,16 +743,8 @@ static int pci_ni8430_init(struct pci_dev *dev)
}
/* UART Port Control Register */
-#define NI16550_PCR_OFFSET 0x0f
-#define NI16550_PCR_RS422 0x00
-#define NI16550_PCR_ECHO_RS485 0x01
-#define NI16550_PCR_DTR_RS485 0x02
-#define NI16550_PCR_AUTO_RS485 0x03
-#define NI16550_PCR_WIRE_MODE_MASK 0x03
-#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
-#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
-#define NI16550_ACR_DTR_AUTO_DTR (0x2 << 3)
-#define NI16550_ACR_DTR_MANUAL_DTR (0x0 << 3)
+#define NI8430_PORTCON 0x0f
+#define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
static int
pci_ni8430_setup(struct serial_private *priv,
@@ -776,117 +766,14 @@ pci_ni8430_setup(struct serial_private *priv,
return -ENOMEM;
/* enable the transceiver */
- writeb(readb(p + offset + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
- p + offset + NI16550_PCR_OFFSET);
+ writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
+ p + offset + NI8430_PORTCON);
iounmap(p);
return setup_port(priv, port, bar, offset, board->reg_shift);
}
-static int pci_ni8431_config_rs485(struct uart_port *port,
- struct serial_rs485 *rs485)
-{
- u8 pcr, acr;
- struct uart_8250_port *up;
-
- up = container_of(port, struct uart_8250_port, port);
- acr = up->acr;
- pcr = port->serial_in(port, NI16550_PCR_OFFSET);
- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
-
- if (rs485->flags & SER_RS485_ENABLED) {
- /* RS-485 */
- if ((rs485->flags & SER_RS485_RX_DURING_TX) &&
- (rs485->flags & SER_RS485_RTS_ON_SEND)) {
- dev_dbg(port->dev, "Invalid 2-wire mode\n");
- return -EINVAL;
- }
-
- if (rs485->flags & SER_RS485_RX_DURING_TX) {
- /* Echo */
- dev_vdbg(port->dev, "2-wire DTR with echo\n");
- pcr |= NI16550_PCR_ECHO_RS485;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- } else {
- /* Auto or DTR */
- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
- /* Auto */
- dev_vdbg(port->dev, "2-wire Auto\n");
- pcr |= NI16550_PCR_AUTO_RS485;
- acr |= NI16550_ACR_DTR_AUTO_DTR;
- } else {
- /* DTR-controlled */
- /* No Echo */
- dev_vdbg(port->dev, "2-wire DTR no echo\n");
- pcr |= NI16550_PCR_DTR_RS485;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- }
- }
- } else {
- /* RS-422 */
- dev_vdbg(port->dev, "4-wire\n");
- pcr |= NI16550_PCR_RS422;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- }
-
- dev_dbg(port->dev, "write pcr: 0x%08x\n", pcr);
- port->serial_out(port, NI16550_PCR_OFFSET, pcr);
-
- up->acr = acr;
- port->serial_out(port, UART_SCR, UART_ACR);
- port->serial_out(port, UART_ICR, up->acr);
-
- /* Update the cache. */
- port->rs485 = *rs485;
-
- return 0;
-}
-
-static int pci_ni8431_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *uart, int idx)
-{
- u8 pcr, acr;
- struct pci_dev *dev = priv->dev;
- void __iomem *addr;
- unsigned int bar, offset = board->first_offset;
-
- if (idx >= board->num_ports)
- return 1;
-
- bar = FL_GET_BASE(board->flags);
- offset += idx * board->uart_offset;
-
- addr = pci_ioremap_bar(dev, bar);
- if (!addr)
- return -ENOMEM;
-
- /* enable the transceiver */
- writeb(readb(addr + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
- addr + NI16550_PCR_OFFSET);
-
- pcr = readb(addr + NI16550_PCR_OFFSET);
- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
-
- /* set wire mode to default RS-422 */
- pcr |= NI16550_PCR_RS422;
- acr = NI16550_ACR_DTR_MANUAL_DTR;
-
- /* write port configuration to register */
- writeb(pcr, addr + NI16550_PCR_OFFSET);
-
- /* access and write to UART acr register */
- writeb(UART_ACR, addr + UART_SCR);
- writeb(acr, addr + UART_ICR);
-
- uart->port.rs485_config = &pci_ni8431_config_rs485;
-
- iounmap(addr);
-
- return setup_port(priv, uart, bar, offset, board->reg_shift);
-}
-
static int pci_netmos_9900_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -2023,15 +1910,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
-#define PCIE_DEVICE_ID_NI_PXIE8430_2328 0x74C2
-#define PCIE_DEVICE_ID_NI_PXIE8430_23216 0x74C1
-#define PCI_DEVICE_ID_NI_PXI8431_4852 0x7081
-#define PCI_DEVICE_ID_NI_PXI8431_4854 0x70DE
-#define PCI_DEVICE_ID_NI_PXI8431_4858 0x70E3
-#define PCI_DEVICE_ID_NI_PXI8433_4852 0x70E9
-#define PCI_DEVICE_ID_NI_PXI8433_4854 0x70ED
-#define PCIE_DEVICE_ID_NI_PXIE8431_4858 0x74C4
-#define PCIE_DEVICE_ID_NI_PXIE8431_48516 0x74C3
#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
@@ -2269,87 +2147,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_ni8430_setup,
.exit = pci_ni8430_exit,
},
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8430_2328,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8430_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8430_23216,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8430_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4852,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4854,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4858,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8433_4852,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8433_4854,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8431_4858,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8431_48516,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
/* Quatech */
{
.vendor = PCI_VENDOR_ID_QUATECH,
@@ -3106,13 +2903,6 @@ enum pci_board_num_t {
pbn_ni8430_4,
pbn_ni8430_8,
pbn_ni8430_16,
- pbn_ni8430_pxie_8,
- pbn_ni8430_pxie_16,
- pbn_ni8431_2,
- pbn_ni8431_4,
- pbn_ni8431_8,
- pbn_ni8431_pxie_8,
- pbn_ni8431_pxie_16,
pbn_ADDIDATA_PCIe_1_3906250,
pbn_ADDIDATA_PCIe_2_3906250,
pbn_ADDIDATA_PCIe_4_3906250,
@@ -3765,55 +3555,6 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 0x10,
.first_offset = 0x800,
},
- [pbn_ni8430_pxie_16] = {
- .flags = FL_BASE0,
- .num_ports = 16,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8430_pxie_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_4] = {
- .flags = FL_BASE0,
- .num_ports = 4,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_2] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_pxie_16] = {
- .flags = FL_BASE0,
- .num_ports = 16,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_pxie_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
/*
* ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
*/
@@ -4060,7 +3801,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
return -ENODEV;
num_iomem = num_port = 0;
- for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
num_port++;
if (first_port == -1)
@@ -4088,7 +3829,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
*/
first_port = -1;
num_port = 0;
- for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_flags(dev, i) & IORESOURCE_IO &&
pci_resource_len(dev, i) == 8 &&
(first_port == -1 || (first_port + num_port) == i)) {
@@ -5567,33 +5308,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_ni8430_4 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_2328,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8430_pxie_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_23216,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8430_pxie_16 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4852,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_2 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4854,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_4 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4858,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_4858,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_pxie_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_48516,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_pxie_16 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4852,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_2 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4854,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_4 },
/*
* MOXA
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8407166610ce..90655910b0c7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2114,20 +2114,6 @@ int serial8250_do_startup(struct uart_port *port)
enable_rsa(up);
#endif
- if (port->type == PORT_XR17V35X) {
- /*
- * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
- * MCR [7:5] and MSR [7:0]
- */
- serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
-
- /*
- * Make sure all interrups are masked until initialization is
- * complete and the FIFOs are cleared
- */
- serial_port_out(port, UART_IER, 0);
- }
-
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 7ef60f8b6e2c..fab3d4f20667 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -243,6 +243,7 @@ config SERIAL_8250_ASPEED_VUART
tristate "Aspeed Virtual UART"
depends on SERIAL_8250
depends on OF
+ depends on REGMAP && MFD_SYSCON
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
@@ -334,7 +335,7 @@ config SERIAL_8250_BCM2835AUX
Features and limitations of the UART are
Registers are similar to 16650 registers,
- set bits in the control registers that are unsupported
+ set bits in the control registers that are unsupported
are ignored and read back as 0
7/8 bit operation with 1 start and 1 stop bit
8 symbols deep fifo for rx and tx
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 540142c5b7b3..99f5da3bf913 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -287,26 +287,26 @@ config SERIAL_SAMSUNG_CONSOLE
boot time.)
config SERIAL_SIRFSOC
- tristate "SiRF SoC Platform Serial port support"
- depends on ARCH_SIRF
- select SERIAL_CORE
- help
- Support for the on-chip UART on the CSR SiRFprimaII series,
- providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
- provide all of these ports, depending on how the serial port
- pins are configured).
+ tristate "SiRF SoC Platform Serial port support"
+ depends on ARCH_SIRF
+ select SERIAL_CORE
+ help
+ Support for the on-chip UART on the CSR SiRFprimaII series,
+ providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ pins are configured).
config SERIAL_SIRFSOC_CONSOLE
- bool "Support for console on SiRF SoC serial port"
- depends on SERIAL_SIRFSOC=y
- select SERIAL_CORE_CONSOLE
- help
- Even if you say Y here, the currently visible virtual console
- (/dev/tty0) will still be used as the system console by default, but
- you can alter that using a kernel command line option such as
- "console=ttySiRFx". (Try "man bootparam" or see the documentation of
- your boot loader about how to pass options to the kernel at
- boot time.)
+ bool "Support for console on SiRF SoC serial port"
+ depends on SERIAL_SIRFSOC=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySiRFx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
config SERIAL_TEGRA
tristate "NVIDIA Tegra20/30 SoC serial controller"
@@ -1078,41 +1078,41 @@ config SERIAL_SCCNXP_CONSOLE
Support for console on SCCNXP serial ports.
config SERIAL_SC16IS7XX_CORE
- tristate
+ tristate
config SERIAL_SC16IS7XX
- tristate "SC16IS7xx serial support"
- select SERIAL_CORE
- depends on (SPI_MASTER && !I2C) || I2C
- help
- This selects support for SC16IS7xx serial ports.
- Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
- SC16IS760 and SC16IS762. Select supported buses using options below.
+ tristate "SC16IS7xx serial support"
+ select SERIAL_CORE
+ depends on (SPI_MASTER && !I2C) || I2C
+ help
+ This selects support for SC16IS7xx serial ports.
+ Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
+ SC16IS760 and SC16IS762. Select supported buses using options below.
config SERIAL_SC16IS7XX_I2C
- bool "SC16IS7xx for I2C interface"
- depends on SERIAL_SC16IS7XX
- depends on I2C
- select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
- select REGMAP_I2C if I2C
- default y
- help
- Enable SC16IS7xx driver on I2C bus,
- If required say y, and say n to i2c if not required,
- Enabled by default to support oldconfig.
- You must select at least one bus for the driver to be built.
+ bool "SC16IS7xx for I2C interface"
+ depends on SERIAL_SC16IS7XX
+ depends on I2C
+ select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
+ select REGMAP_I2C if I2C
+ default y
+ help
+ Enable SC16IS7xx driver on I2C bus,
+ If required say y, and say n to i2c if not required,
+ Enabled by default to support oldconfig.
+ You must select at least one bus for the driver to be built.
config SERIAL_SC16IS7XX_SPI
- bool "SC16IS7xx for spi interface"
- depends on SERIAL_SC16IS7XX
- depends on SPI_MASTER
- select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
- select REGMAP_SPI if SPI_MASTER
- help
- Enable SC16IS7xx driver on SPI bus,
- If required say y, and say n to spi if not required,
- This is additional support to exsisting driver.
- You must select at least one bus for the driver to be built.
+ bool "SC16IS7xx for spi interface"
+ depends on SERIAL_SC16IS7XX
+ depends on SPI_MASTER
+ select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
+ select REGMAP_SPI if SPI_MASTER
+ help
+ Enable SC16IS7xx driver on SPI bus,
+ If required say y, and say n to spi if not required,
+ This is additional support to exsisting driver.
+ You must select at least one bus for the driver to be built.
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
@@ -1212,7 +1212,7 @@ config SERIAL_ALTERA_UART_CONSOLE
Enable a Altera UART port to be the system console.
config SERIAL_IFX6X60
- tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
+ tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
depends on GPIOLIB || COMPILE_TEST
depends on SPI && HAS_DMA
help
@@ -1392,19 +1392,19 @@ config SERIAL_FSL_LPUART_CONSOLE
you can make it the console by answering Y to this option.
config SERIAL_FSL_LINFLEXUART
- tristate "Freescale linflexuart serial port support"
+ tristate "Freescale LINFlexD UART serial port support"
depends on PRINTK
select SERIAL_CORE
help
- Support for the on-chip linflexuart on some Freescale SOCs.
+ Support for the on-chip LINFlexD UART on some Freescale SOCs.
config SERIAL_FSL_LINFLEXUART_CONSOLE
- bool "Console on Freescale linflexuart serial port"
+ bool "Console on Freescale LINFlexD UART serial port"
depends on SERIAL_FSL_LINFLEXUART=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
- If you have enabled the linflexuart serial port on the Freescale
+ If you have enabled the LINFlexD UART serial port on the Freescale
SoCs, you can make it the console by answering Y to this option.
config SERIAL_CONEXANT_DIGICOLOR
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 863f47056539..d056ee6cca33 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
-obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
+obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3a7d1a66f79c..4b28134d596a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -414,7 +414,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_cap_mask_t mask;
uap->dma_probed = true;
- chan = dma_request_slave_channel_reason(dev, "tx");
+ chan = dma_request_chan(dev, "tx");
if (IS_ERR(chan)) {
if (PTR_ERR(chan) == -EPROBE_DEFER) {
uap->dma_probed = false;
@@ -813,10 +813,8 @@ __acquires(&uap->port.lock)
if (!uap->using_tx_dma)
return;
- /* Avoid deadlock with the DMA engine callback */
- spin_unlock(&uap->port.lock);
- dmaengine_terminate_all(uap->dmatx.chan);
- spin_lock(&uap->port.lock);
+ dmaengine_terminate_async(uap->dmatx.chan);
+
if (uap->dmatx.queued) {
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
DMA_TO_DEVICE);
@@ -1236,10 +1234,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#else
/* Blank functions if the DMA engine is not available */
-static inline void pl011_dma_probe(struct uart_amba_port *uap)
-{
-}
-
static inline void pl011_dma_remove(struct uart_amba_port *uap)
{
}
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index a32f0d2afd59..205c31a61684 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Freescale linflexuart serial port driver
+ * Freescale LINFlexD UART serial port driver
*
* Copyright 2012-2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
@@ -940,5 +940,5 @@ static void __exit linflex_serial_exit(void)
module_init(linflex_serial_init);
module_exit(linflex_serial_exit);
-MODULE_DESCRIPTION("Freescale linflex serial port driver");
+MODULE_DESCRIPTION("Freescale LINFlexD serial port driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 537896c4d887..4e128d19e0ad 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -437,8 +437,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
}
sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
- sport->dma_tx_nents,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ ret, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
if (!sport->dma_tx_desc) {
dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
dev_err(dev, "Cannot prepare TX slave DMA!\n");
@@ -1280,6 +1280,57 @@ static int lpuart_config_rs485(struct uart_port *port,
return 0;
}
+static int lpuart32_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+
+ unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
+ & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
+ lpuart32_write(&sport->port, modem, UARTMODIR);
+
+ /* clear unsupported configurations */
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+ rs485->flags &= ~SER_RS485_RX_DURING_TX;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* Enable auto RS-485 RTS mode */
+ modem |= UARTMODEM_TXRTSE;
+
+ /*
+ * RTS needs to be logic HIGH either during transer _or_ after
+ * transfer, other variants are not supported by the hardware.
+ */
+
+ if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND)))
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+
+ if (rs485->flags & SER_RS485_RTS_ON_SEND &&
+ rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+
+ /*
+ * The hardware defaults to RTS logic HIGH while transfer.
+ * Switch polarity in case RTS shall be logic HIGH
+ * after transfer.
+ * Note: UART is assumed to be active high.
+ */
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem |= UARTMODEM_TXRTSPOL;
+ }
+
+ /* Store the new configuration */
+ sport->port.rs485 = *rs485;
+
+ lpuart32_write(&sport->port, modem, UARTMODIR);
+ return 0;
+}
+
static unsigned int lpuart_get_mctrl(struct uart_port *port)
{
unsigned int temp = 0;
@@ -1333,18 +1384,7 @@ static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- unsigned long temp;
-
- temp = lpuart32_read(port, UARTMODIR) &
- ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
-
- if (mctrl & TIOCM_RTS)
- temp |= UARTMODIR_RXRTSE;
-
- if (mctrl & TIOCM_CTS)
- temp |= UARTMODIR_TXCTSE;
- lpuart32_write(port, temp, UARTMODIR);
}
static void lpuart_break_ctl(struct uart_port *port, int break_state)
@@ -1889,11 +1929,18 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
ctrl |= UARTCTRL_M;
}
+ /*
+ * When auto RS-485 RTS mode is enabled,
+ * hardware flow control need to be disabled.
+ */
+ if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ termios->c_cflag &= ~CRTSCTS;
+
if (termios->c_cflag & CRTSCTS) {
- modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE;
+ modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
} else {
termios->c_cflag &= ~CRTSCTS;
- modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+ modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
}
if (termios->c_cflag & CSTOPB)
@@ -2416,7 +2463,10 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.ops = &lpuart_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
- sport->port.rs485_config = lpuart_config_rs485;
+ if (lpuart_is_32(sport))
+ sport->port.rs485_config = lpuart32_config_rs485;
+ else
+ sport->port.rs485_config = lpuart_config_rs485;
sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->ipg_clk)) {
@@ -2470,7 +2520,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485.delay_rts_after_send)
dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
- lpuart_config_rs485(&sport->port, &sport->port.rs485);
+ sport->port.rs485_config(&sport->port, &sport->port.rs485);
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index ffefd218761e..31033d517e82 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1230,6 +1230,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
/* stop activity */
tasklet_kill(&ifx_dev->io_work_tasklet);
+
+ pm_runtime_disable(&spi->dev);
+
/* free irq */
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 5e08f2657b90..a9e20e6c63ad 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -619,7 +619,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
dev_err(dev, "DMA mapping error for TX.\n");
return;
}
- desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
+ desc = dmaengine_prep_slave_sg(chan, sgl, ret,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!desc) {
dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
@@ -1034,8 +1034,6 @@ static void imx_uart_timeout(struct timer_list *t)
}
}
-#define RX_BUF_SIZE (PAGE_SIZE)
-
/*
* There are two kinds of RX DMA interrupts(such as in the MX6Q):
* [1] the RX DMA buffer is full.
@@ -1118,7 +1116,8 @@ static void imx_uart_dma_rx_callback(void *data)
}
/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
+#define RX_DMA_PERIODS 16
+#define RX_BUF_SIZE (RX_DMA_PERIODS * PAGE_SIZE / 4)
static int imx_uart_start_rx_dma(struct imx_port *sport)
{
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 3657a24913fc..1cbae0768b1f 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -301,7 +301,7 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
dma = &msm_port->tx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "tx");
+ dma->chan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan))
goto no_tx;
@@ -344,7 +344,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
dma = &msm_port->rx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "rx");
+ dma->chan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->chan))
goto no_rx;
@@ -980,6 +980,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int mr;
/* reset everything */
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
@@ -987,7 +988,10 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr = msm_read(port, UART_MR1);
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 6157213a8359..c16234bca78f 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -233,6 +233,7 @@ struct eg20t_port {
struct dma_chan *chan_rx;
struct scatterlist *sg_tx_p;
int nent;
+ int orig_nent;
struct scatterlist sg_rx;
int tx_dma_use;
void *rx_buf_virt;
@@ -787,9 +788,10 @@ static void pch_dma_tx_complete(void *arg)
}
xmit->tail &= UART_XMIT_SIZE - 1;
async_tx_ack(priv->desc_tx);
- dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
+ dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
priv->tx_dma_use = 0;
priv->nent = 0;
+ priv->orig_nent = 0;
kfree(priv->sg_tx_p);
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
}
@@ -1010,6 +1012,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__);
return 0;
}
+ priv->orig_nent = num;
priv->nent = nent;
for (i = 0; i < nent; i++, sg++) {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 14c6306bc462..ff63728a95f4 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -9,10 +9,12 @@
#include <linux/console.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/qcom-geni-se.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -115,6 +117,7 @@ struct qcom_geni_serial_port {
bool brk;
unsigned int tx_remaining;
+ int wakeup_irq;
};
static const struct uart_ops qcom_geni_console_pops;
@@ -754,6 +757,15 @@ out_write_wakeup:
uart_write_wakeup(uport);
}
+static irqreturn_t qcom_geni_serial_wakeup_isr(int isr, void *dev)
+{
+ struct uart_port *uport = dev;
+
+ pm_wakeup_event(uport->dev, 2000);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
u32 m_irq_en;
@@ -830,7 +842,7 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport)
if (uart_console(uport))
console_stop(uport->cons);
- free_irq(uport->irq, uport);
+ disable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
qcom_geni_serial_stop_tx(uport);
qcom_geni_serial_stop_rx(uport);
@@ -890,21 +902,14 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
int ret;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- scnprintf(port->name, sizeof(port->name),
- "qcom_serial_%s%d",
- (uart_console(uport) ? "console" : "uart"), uport->line);
-
if (!port->setup) {
ret = qcom_geni_serial_port_setup(uport);
if (ret)
return ret;
}
+ enable_irq(uport->irq);
- ret = request_irq(uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH,
- port->name, uport);
- if (ret)
- dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
- return ret;
+ return 0;
}
static unsigned long get_clk_cfg(unsigned long clk_freq)
@@ -1297,11 +1302,44 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+ scnprintf(port->name, sizeof(port->name), "qcom_geni_serial_%s%d",
+ (uart_console(uport) ? "console" : "uart"), uport->line);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
uport->irq = irq;
+ irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
+ IRQF_TRIGGER_HIGH, port->name, uport);
+ if (ret) {
+ dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ return ret;
+ }
+
+ if (!console) {
+ port->wakeup_irq = platform_get_irq(pdev, 1);
+ if (port->wakeup_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get wakeup IRQ %d\n",
+ port->wakeup_irq);
+ } else {
+ irq_set_status_flags(port->wakeup_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(uport->dev, port->wakeup_irq,
+ qcom_geni_serial_wakeup_isr,
+ IRQF_TRIGGER_FALLING, "uart_wakeup", uport);
+ if (ret) {
+ dev_err(uport->dev, "Failed to register wakeup IRQ ret %d\n",
+ ret);
+ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, port->wakeup_irq);
+ if (unlikely(ret))
+ dev_err(uport->dev, "%s:Failed to set IRQ wake:%d\n",
+ __func__, ret);
+ }
+ }
uport->private_data = drv;
platform_set_drvdata(pdev, port);
port->handle_rx = console ? handle_rx_console : handle_rx_uart;
@@ -1324,7 +1362,12 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- return uart_suspend_port(uport->private_data, uport);
+ uart_suspend_port(uport->private_data, uport);
+
+ if (port->wakeup_irq > 0)
+ enable_irq(port->wakeup_irq);
+
+ return 0;
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
@@ -1332,6 +1375,9 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
+ if (port->wakeup_irq > 0)
+ disable_irq(port->wakeup_irq);
+
return uart_resume_port(uport->private_data, uport);
}
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung_tty.c
index 83fd51607741..83fd51607741 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung_tty.c
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 2f599515c133..b6ace6290e23 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1122,8 +1122,7 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
int ret;
struct dma_slave_config dma_sconfig;
- dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
- dma_to_memory ? "rx" : "tx");
+ dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
dev_err(tup->uport.dev,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index c4a414a46c7f..b0a6eb106edb 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1111,7 +1111,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
if (!uport)
goto out;
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
uport->ops->break_ctl(uport, break_state);
ret = 0;
out:
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 004ca684d3ae..637b09d3fe79 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -120,7 +120,8 @@ static u32 uart_usp_ff_empty_mask(struct uart_port *port)
empty_bit = ilog2(port->fifosize) + 1;
return (1 << empty_bit);
}
-struct sirfsoc_uart_register sirfsoc_usp = {
+
+static struct sirfsoc_uart_register sirfsoc_usp = {
.uart_reg = {
.sirfsoc_mode1 = 0x0000,
.sirfsoc_mode2 = 0x0004,
@@ -186,7 +187,7 @@ struct sirfsoc_uart_register sirfsoc_usp = {
},
};
-struct sirfsoc_uart_register sirfsoc_uart = {
+static struct sirfsoc_uart_register sirfsoc_uart = {
.uart_reg = {
.sirfsoc_line_ctrl = 0x0040,
.sirfsoc_tx_rx_en = 0x004c,
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 771d11196523..31df23502562 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -919,6 +919,34 @@ static void sprd_pm(struct uart_port *port, unsigned int state,
}
}
+#ifdef CONFIG_CONSOLE_POLL
+static int sprd_poll_init(struct uart_port *port)
+{
+ if (port->state->pm_state != UART_PM_STATE_ON) {
+ sprd_pm(port, UART_PM_STATE_ON, 0);
+ port->state->pm_state = UART_PM_STATE_ON;
+ }
+
+ return 0;
+}
+
+static int sprd_poll_get_char(struct uart_port *port)
+{
+ while (!(serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK))
+ cpu_relax();
+
+ return serial_in(port, SPRD_RXD);
+}
+
+static void sprd_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK)
+ cpu_relax();
+
+ serial_out(port, SPRD_TXD, ch);
+}
+#endif
+
static const struct uart_ops serial_sprd_ops = {
.tx_empty = sprd_tx_empty,
.get_mctrl = sprd_get_mctrl,
@@ -936,6 +964,11 @@ static const struct uart_ops serial_sprd_ops = {
.config_port = sprd_config_port,
.verify_port = sprd_verify_port,
.pm = sprd_pm,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = sprd_poll_init,
+ .poll_get_char = sprd_poll_get_char,
+ .poll_put_char = sprd_poll_put_char,
+#endif
};
#ifdef CONFIG_SERIAL_SPRD_CONSOLE
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index df90747ee3a8..2f72514d63ed 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -240,8 +240,8 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
* cleared by the sequence [read SR - read DR].
*/
if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
- stm32_clr_bits(port, ofs->icr, USART_ICR_ORECF |
- USART_ICR_PECF | USART_ICR_FECF);
+ writel_relaxed(sr & USART_SR_ERR_MASK,
+ port->membase + ofs->icr);
c = stm32_get_char(port, &sr, &stm32_port->last_res);
port->icount.rx++;
@@ -435,7 +435,7 @@ static void stm32_transmit_chars(struct uart_port *port)
if (ofs->icr == UNDEF_REG)
stm32_clr_bits(port, ofs->isr, USART_SR_TC);
else
- stm32_set_bits(port, ofs->icr, USART_ICR_TCCF);
+ writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
if (stm32_port->tx_ch)
stm32_transmit_chars_dma(port);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 06e79c11141d..7dbd0c471d92 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -22,7 +22,6 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
@@ -55,7 +54,6 @@
#define ULITE_CONTROL_RST_TX 0x01
#define ULITE_CONTROL_RST_RX 0x02
#define ULITE_CONTROL_IE 0x10
-#define UART_AUTOSUSPEND_TIMEOUT 3000
/* Static pointer to console port */
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
@@ -65,7 +63,6 @@ static struct uart_port *console_port;
struct uartlite_data {
const struct uartlite_reg_ops *reg_ops;
struct clk *clk;
- struct uart_driver *ulite_uart_driver;
};
struct uartlite_reg_ops {
@@ -393,12 +390,12 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
static void ulite_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- if (!state) {
- pm_runtime_get_sync(port->dev);
- } else {
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
- }
+ struct uartlite_data *pdata = port->private_data;
+
+ if (!state)
+ clk_enable(pdata->clk);
+ else
+ clk_disable(pdata->clk);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -697,9 +694,7 @@ static int ulite_release(struct device *dev)
int rc = 0;
if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- rc = uart_remove_one_port(pdata->ulite_uart_driver, port);
+ rc = uart_remove_one_port(&ulite_uart_driver, port);
dev_set_drvdata(dev, NULL);
port->mapbase = 0;
}
@@ -717,11 +712,8 @@ static int __maybe_unused ulite_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
- if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- uart_suspend_port(pdata->ulite_uart_driver, port);
- }
+ if (port)
+ uart_suspend_port(&ulite_uart_driver, port);
return 0;
}
@@ -736,41 +728,17 @@ static int __maybe_unused ulite_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
- if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- uart_resume_port(pdata->ulite_uart_driver, port);
- }
+ if (port)
+ uart_resume_port(&ulite_uart_driver, port);
return 0;
}
-static int __maybe_unused ulite_runtime_suspend(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct uartlite_data *pdata = port->private_data;
-
- clk_disable(pdata->clk);
- return 0;
-};
-
-static int __maybe_unused ulite_runtime_resume(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct uartlite_data *pdata = port->private_data;
-
- clk_enable(pdata->clk);
- return 0;
-}
/* ---------------------------------------------------------------------
* Platform bus binding
*/
-static const struct dev_pm_ops ulite_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ulite_suspend, ulite_resume)
- SET_RUNTIME_PM_OPS(ulite_runtime_suspend,
- ulite_runtime_resume, NULL)
-};
+static SIMPLE_DEV_PM_OPS(ulite_pm_ops, ulite_suspend, ulite_resume);
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
@@ -795,22 +763,6 @@ static int ulite_probe(struct platform_device *pdev)
if (prop)
id = be32_to_cpup(prop);
#endif
- if (id < 0) {
- /* Look for a serialN alias */
- id = of_alias_get_id(pdev->dev.of_node, "serial");
- if (id < 0)
- id = 0;
- }
-
- if (!ulite_uart_driver.state) {
- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
- ret = uart_register_driver(&ulite_uart_driver);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register driver\n");
- return ret;
- }
- }
-
pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data),
GFP_KERNEL);
if (!pdata)
@@ -836,22 +788,24 @@ static int ulite_probe(struct platform_device *pdev)
pdata->clk = NULL;
}
- pdata->ulite_uart_driver = &ulite_uart_driver;
ret = clk_prepare_enable(pdata->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clock\n");
return ret;
}
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ if (!ulite_uart_driver.state) {
+ dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
+ ret = uart_register_driver(&ulite_uart_driver);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register driver\n");
+ return ret;
+ }
+ }
ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ clk_disable(pdata->clk);
return ret;
}
@@ -860,14 +814,9 @@ static int ulite_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
struct uartlite_data *pdata = port->private_data;
- int rc;
- clk_unprepare(pdata->clk);
- rc = ulite_release(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- return rc;
+ clk_disable_unprepare(pdata->clk);
+ return ulite_release(&pdev->dev);
}
/* work with hotplug and coldplug */
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index a81807b394d1..d9f54c7d94f2 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1345,9 +1345,12 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
if (!tty->port)
tty->port = driver->ports[idx];
- WARN_RATELIMIT(!tty->port,
- "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
- __func__, tty->driver->name);
+ if (WARN_RATELIMIT(!tty->port,
+ "%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n",
+ __func__, tty->driver->name)) {
+ retval = -EINVAL;
+ goto err_release_lock;
+ }
retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval)
@@ -1925,7 +1928,6 @@ EXPORT_SYMBOL_GPL(tty_kopen);
/**
* tty_open_by_driver - open a tty device
* @device: dev_t of device to open
- * @inode: inode of device file
* @filp: file pointer to tty
*
* Performs the driver lookup, checks for a reopen, or otherwise
@@ -1938,7 +1940,7 @@ EXPORT_SYMBOL_GPL(tty_kopen);
* - concurrent tty driver removal w/ lookup
* - concurrent tty removal from driver table
*/
-static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
+static struct tty_struct *tty_open_by_driver(dev_t device,
struct file *filp)
{
struct tty_struct *tty;
@@ -2030,7 +2032,7 @@ retry_open:
tty = tty_open_current_tty(device, filp);
if (!tty)
- tty = tty_open_by_driver(device, inode, filp);
+ tty = tty_open_by_driver(device, filp);
if (IS_ERR(tty)) {
tty_free_file(filp);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4c49f53afa3e..ec1f6a48121e 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -156,12 +156,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* takes tty_ldiscs_lock to guard against ldisc races
*/
-#if defined(CONFIG_LDISC_AUTOLOAD)
- #define INITIAL_AUTOLOAD_STATE 1
-#else
- #define INITIAL_AUTOLOAD_STATE 0
-#endif
-static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
+static int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD);
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 515fc095e3b4..15d33fa0c925 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1491,7 +1491,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type,
if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
kbd_rawcode(value);
- if (event_type == EV_KEY)
+ if (event_type == EV_KEY && event_code <= KEY_MAX)
kbd_keycode(event_code, value, HW_RAW(handle->dev));
spin_unlock(&kbd_event_lock);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 1f042346e722..778f83ea2249 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -456,6 +456,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
size_t ret;
char *con_buf;
+ if (use_unicode(inode))
+ return -EOPNOTSUPP;
+
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9e26b0143a59..9ae2a7a93df2 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -234,7 +234,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* UHCI */
int region;
- for (region = 0; region < PCI_ROM_RESOURCE; region++) {
+ for (region = 0; region < PCI_STD_NUM_BARS; region++) {
if (!(pci_resource_flags(dev, region) &
IORESOURCE_IO))
continue;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1709895387b9..f229ad6952c0 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -18,6 +18,7 @@
#include <linux/sched/mm.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/kcov.h>
#include <linux/ioctl.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
@@ -5484,6 +5485,8 @@ static void hub_event(struct work_struct *work)
hub_dev = hub->intfdev;
intf = to_usb_interface(hub_dev);
+ kcov_remote_start_usb((u64)hdev->bus->busnum);
+
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
hdev->state, hdev->maxchild,
/* NOTE: expects max 15 ports... */
@@ -5590,6 +5593,8 @@ out_hdev_lock:
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
kref_put(&hub->kref, hub_release);
+
+ kcov_remote_stop();
}
static const struct usb_device_id hub_id_table[] = {
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f6d04491df60..6c7f0a876b96 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -728,7 +728,7 @@ static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
if (!pio_enabled(pdev))
return;
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
base = pci_resource_start(pdev, i);
break;
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 8b1b73065421..98c1aa594e6c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -561,7 +561,7 @@ static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
residue = min(residue, transfer_length);
if (us->srb != NULL)
scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
- (int)residue));
+ residue));
}
if (bcs->Status != US_BULK_STAT_OK)
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 96cb0409dd89..238a8088e17f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1284,8 +1284,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
} else {
residue = min(residue, transfer_length);
- scsi_set_resid(srb, max(scsi_get_resid(srb),
- (int) residue));
+ scsi_set_resid(srb, max(scsi_get_resid(srb), residue));
}
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 475b9c692827..95bba3ba6ac6 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -869,7 +869,6 @@ static struct scsi_host_template uas_host_template = {
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
- .sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
.dma_boundary = PAGE_SIZE - 1,
};
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 02206162eaa9..379a02c36e37 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -110,13 +110,15 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
{
struct resource *res;
- int bar;
+ int i;
struct vfio_pci_dummy_resource *dummy_res;
INIT_LIST_HEAD(&vdev->dummy_resources_list);
- for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
- res = vdev->pdev->resource + bar;
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ res = &vdev->pdev->resource[bar];
if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
goto no_mmap;
@@ -399,7 +401,8 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
vfio_config_free(vdev);
- for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ bar = i + PCI_STD_RESOURCES;
if (!vdev->barmap[bar])
continue;
pci_iounmap(pdev, vdev->barmap[bar]);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index f0891bd8444c..90c0b80f8acf 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -450,30 +450,32 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int i;
- __le32 *bar;
+ __le32 *vbar;
u64 mask;
- bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
+ vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
- for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
- if (!pci_resource_start(pdev, i)) {
- *bar = 0; /* Unmapped by host = unimplemented to user */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ if (!pci_resource_start(pdev, bar)) {
+ *vbar = 0; /* Unmapped by host = unimplemented to user */
continue;
}
- mask = ~(pci_resource_len(pdev, i) - 1);
+ mask = ~(pci_resource_len(pdev, bar) - 1);
- *bar &= cpu_to_le32((u32)mask);
- *bar |= vfio_generate_bar_flags(pdev, i);
+ *vbar &= cpu_to_le32((u32)mask);
+ *vbar |= vfio_generate_bar_flags(pdev, bar);
- if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
- bar++;
- *bar &= cpu_to_le32((u32)(mask >> 32));
+ if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ vbar++;
+ *vbar &= cpu_to_le32((u32)(mask >> 32));
i++;
}
}
- bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
+ vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
/*
* NB. REGION_INFO will have reported zero size if we weren't able
@@ -483,14 +485,14 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
- *bar &= cpu_to_le32((u32)mask);
+ *vbar &= cpu_to_le32((u32)mask);
} else if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW) {
mask = ~(0x20000 - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
- *bar &= cpu_to_le32((u32)mask);
+ *vbar &= cpu_to_le32((u32)mask);
} else
- *bar = 0;
+ *vbar = 0;
vdev->bardirty = false;
}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 3fa3f728fb39..2056f3f85f59 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -294,8 +294,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
irq = pci_irq_vector(pdev, vector);
if (vdev->ctx[vector].trigger) {
- free_irq(irq, vdev->ctx[vector].trigger);
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
+ free_irq(irq, vdev->ctx[vector].trigger);
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index ee6ee91718a4..8a2c7607d513 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -86,8 +86,8 @@ struct vfio_pci_reflck {
struct vfio_pci_device {
struct pci_dev *pdev;
- void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
- bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1];
+ void __iomem *barmap[PCI_STD_NUM_BARS];
+ bool bar_mmap_supported[PCI_STD_NUM_BARS];
u8 *pci_config_map;
u8 *vconfig;
struct perm_bits *msi_perm;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index d864277ea16f..2ada8e6cdb88 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -294,31 +294,13 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
* Some mappings aren't backed by a struct page, for example an mmap'd
* MMIO range for our own or another device. These use a different
* pfn conversion and shouldn't be tracked as locked pages.
+ * For compound pages, any driver that sets the reserved bit in head
+ * page needs to set the reserved bit in all subpages to be safe.
*/
static bool is_invalid_reserved_pfn(unsigned long pfn)
{
- if (pfn_valid(pfn)) {
- bool reserved;
- struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_head(tail);
- reserved = !!(PageReserved(head));
- if (head != tail) {
- /*
- * "head" is not a dangling pointer
- * (compound_head takes care of that)
- * but the hugepage may have been split
- * from under us (and we may not hold a
- * reference count on the head page so it can
- * be reused before we run PageReferenced), so
- * we've to check PageTail before returning
- * what we just read.
- */
- smp_rmb();
- if (PageTail(tail))
- return reserved;
- }
- return PageReserved(tail);
- }
+ if (pfn_valid(pfn))
+ return PageReserved(pfn_to_page(pfn));
return true;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 36ca2cf419bf..f44340b41494 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -30,6 +30,7 @@
#include <linux/sched/signal.h>
#include <linux/interval_tree_generic.h>
#include <linux/nospec.h>
+#include <linux/kcov.h>
#include "vhost.h"
@@ -357,7 +358,9 @@ static int vhost_worker(void *data)
llist_for_each_entry_safe(work, work_next, node, node) {
clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
+ kcov_remote_start_common(dev->kcov_handle);
work->fn(work);
+ kcov_remote_stop();
if (need_resched())
schedule();
}
@@ -546,6 +549,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
/* No owner, become one */
dev->mm = get_task_mm(current);
+ dev->kcov_handle = kcov_common_handle();
worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
@@ -571,6 +575,7 @@ err_worker:
if (dev->mm)
mmput(dev->mm);
dev->mm = NULL;
+ dev->kcov_handle = 0;
err_mm:
return err;
}
@@ -682,6 +687,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
+ dev->kcov_handle = 0;
}
if (dev->mm)
mmput(dev->mm);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index e9ed2722b633..a123fd70847e 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,6 +173,7 @@ struct vhost_dev {
int iov_limit;
int weight;
int byte_weight;
+ u64 kcov_handle;
};
bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index 2dc5703eac51..7c4483c7f313 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -2593,7 +2593,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
* calling pci_set_power_state()
*/
radeonfb_whack_power_state(rinfo, PCI_D2);
- __pci_complete_power_transition(rinfo->pdev, PCI_D2);
+ pci_platform_power_transition(rinfo->pdev, PCI_D2);
} else {
printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n",
pci_name(rinfo->pdev));
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 95c32952fa8a..6f6fc785b545 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1772,7 +1772,7 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
bool primary = false;
int err, idx, bar;
- for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
idx++;
@@ -1782,7 +1782,7 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
if (!ap)
return -ENOMEM;
- for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
ap->ranges[idx].base = pci_resource_start(pdev, bar);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 51d97ec4f58f..1caa3726cb45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -653,7 +653,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
if (!base)
return;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *res = &dev->resource[i];
if (!(res->flags & IORESOURCE_MEM))
diff --git a/drivers/video/logo/.gitignore b/drivers/video/logo/.gitignore
index e48355f538fa..9dda1b26b2e4 100644
--- a/drivers/video/logo/.gitignore
+++ b/drivers/video/logo/.gitignore
@@ -5,3 +5,4 @@
*_vga16.c
*_clut224.c
*_gray256.c
+pnmtologo
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 16f60c1e1766..bcda657493a4 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -18,24 +18,19 @@ obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
# How to generate logo's
-pnmtologo := scripts/pnmtologo
+hostprogs-y := pnmtologo
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
quiet_cmd_logo = LOGO $@
- cmd_logo = $(pnmtologo) \
- -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
- -n $(notdir $(basename $<)) -o $@ $<
+ cmd_logo = $(obj)/pnmtologo -t $(lastword $(subst _, ,$*)) -n $* -o $@ $<
-$(obj)/%_mono.c: $(src)/%_mono.pbm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.pbm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
-$(obj)/%_vga16.c: $(src)/%_vga16.ppm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.ppm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
-$(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
- $(call if_changed,logo)
-
-$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.pgm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
# generated C files
diff --git a/drivers/video/logo/pnmtologo.c b/drivers/video/logo/pnmtologo.c
new file mode 100644
index 000000000000..4718d7895f0b
--- /dev/null
+++ b/drivers/video/logo/pnmtologo.c
@@ -0,0 +1,514 @@
+
+/*
+ * Convert a logo in ASCII PNM format to C source suitable for inclusion in
+ * the Linux kernel
+ *
+ * (C) Copyright 2001-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * --------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+
+static const char *programname;
+static const char *filename;
+static const char *logoname = "linux_logo";
+static const char *outputname;
+static FILE *out;
+
+
+#define LINUX_LOGO_MONO 1 /* monochrome black/white */
+#define LINUX_LOGO_VGA16 2 /* 16 colors VGA text palette */
+#define LINUX_LOGO_CLUT224 3 /* 224 colors */
+#define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */
+
+static const char *logo_types[LINUX_LOGO_GRAY256+1] = {
+ [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
+ [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
+ [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
+ [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
+};
+
+#define MAX_LINUX_LOGO_COLORS 224
+
+struct color {
+ unsigned char red;
+ unsigned char green;
+ unsigned char blue;
+};
+
+static const struct color clut_vga16[16] = {
+ { 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0xaa },
+ { 0x00, 0xaa, 0x00 },
+ { 0x00, 0xaa, 0xaa },
+ { 0xaa, 0x00, 0x00 },
+ { 0xaa, 0x00, 0xaa },
+ { 0xaa, 0x55, 0x00 },
+ { 0xaa, 0xaa, 0xaa },
+ { 0x55, 0x55, 0x55 },
+ { 0x55, 0x55, 0xff },
+ { 0x55, 0xff, 0x55 },
+ { 0x55, 0xff, 0xff },
+ { 0xff, 0x55, 0x55 },
+ { 0xff, 0x55, 0xff },
+ { 0xff, 0xff, 0x55 },
+ { 0xff, 0xff, 0xff },
+};
+
+
+static int logo_type = LINUX_LOGO_CLUT224;
+static unsigned int logo_width;
+static unsigned int logo_height;
+static struct color **logo_data;
+static struct color logo_clut[MAX_LINUX_LOGO_COLORS];
+static unsigned int logo_clutsize;
+static int is_plain_pbm = 0;
+
+static void die(const char *fmt, ...)
+ __attribute__ ((noreturn)) __attribute ((format (printf, 1, 2)));
+static void usage(void) __attribute ((noreturn));
+
+
+static unsigned int get_number(FILE *fp)
+{
+ int c, val;
+
+ /* Skip leading whitespace */
+ do {
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ if (c == '#') {
+ /* Ignore comments 'till end of line */
+ do {
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ } while (c != '\n');
+ }
+ } while (isspace(c));
+
+ /* Parse decimal number */
+ val = 0;
+ while (isdigit(c)) {
+ val = 10*val+c-'0';
+ /* some PBM are 'broken'; GiMP for example exports a PBM without space
+ * between the digits. This is Ok cause we know a PBM can only have a '1'
+ * or a '0' for the digit. */
+ if (is_plain_pbm)
+ break;
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ }
+ return val;
+}
+
+static unsigned int get_number255(FILE *fp, unsigned int maxval)
+{
+ unsigned int val = get_number(fp);
+ return (255*val+maxval/2)/maxval;
+}
+
+static void read_image(void)
+{
+ FILE *fp;
+ unsigned int i, j;
+ int magic;
+ unsigned int maxval;
+
+ /* open image file */
+ fp = fopen(filename, "r");
+ if (!fp)
+ die("Cannot open file %s: %s\n", filename, strerror(errno));
+
+ /* check file type and read file header */
+ magic = fgetc(fp);
+ if (magic != 'P')
+ die("%s is not a PNM file\n", filename);
+ magic = fgetc(fp);
+ switch (magic) {
+ case '1':
+ case '2':
+ case '3':
+ /* Plain PBM/PGM/PPM */
+ break;
+
+ case '4':
+ case '5':
+ case '6':
+ /* Binary PBM/PGM/PPM */
+ die("%s: Binary PNM is not supported\n"
+ "Use pnmnoraw(1) to convert it to ASCII PNM\n", filename);
+
+ default:
+ die("%s is not a PNM file\n", filename);
+ }
+ logo_width = get_number(fp);
+ logo_height = get_number(fp);
+
+ /* allocate image data */
+ logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
+ if (!logo_data)
+ die("%s\n", strerror(errno));
+ for (i = 0; i < logo_height; i++) {
+ logo_data[i] = malloc(logo_width*sizeof(struct color));
+ if (!logo_data[i])
+ die("%s\n", strerror(errno));
+ }
+
+ /* read image data */
+ switch (magic) {
+ case '1':
+ /* Plain PBM */
+ is_plain_pbm = 1;
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = 255*(1-get_number(fp));
+ break;
+
+ case '2':
+ /* Plain PGM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ break;
+
+ case '3':
+ /* Plain PPM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ logo_data[i][j].red = get_number255(fp, maxval);
+ logo_data[i][j].green = get_number255(fp, maxval);
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ }
+ break;
+ }
+
+ /* close file */
+ fclose(fp);
+}
+
+static inline int is_black(struct color c)
+{
+ return c.red == 0 && c.green == 0 && c.blue == 0;
+}
+
+static inline int is_white(struct color c)
+{
+ return c.red == 255 && c.green == 255 && c.blue == 255;
+}
+
+static inline int is_gray(struct color c)
+{
+ return c.red == c.green && c.red == c.blue;
+}
+
+static inline int is_equal(struct color c1, struct color c2)
+{
+ return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
+}
+
+static void write_header(void)
+{
+ /* open logo file */
+ if (outputname) {
+ out = fopen(outputname, "w");
+ if (!out)
+ die("Cannot create file %s: %s\n", outputname, strerror(errno));
+ } else {
+ out = stdout;
+ }
+
+ fputs("/*\n", out);
+ fputs(" * DO NOT EDIT THIS FILE!\n", out);
+ fputs(" *\n", out);
+ fprintf(out, " * It was automatically generated from %s\n", filename);
+ fputs(" *\n", out);
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+ fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+ logoname);
+}
+
+static void write_footer(void)
+{
+ fputs("\n};\n\n", out);
+ fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+ if (logo_type == LINUX_LOGO_CLUT224) {
+ fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
+ fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
+ }
+ fprintf(out, "\t.data\t\t= %s_data\n", logoname);
+ fputs("};\n\n", out);
+
+ /* close logo file */
+ if (outputname)
+ fclose(out);
+}
+
+static int write_hex_cnt;
+
+static void write_hex(unsigned char byte)
+{
+ if (write_hex_cnt % 12)
+ fprintf(out, ", 0x%02x", byte);
+ else if (write_hex_cnt)
+ fprintf(out, ",\n\t0x%02x", byte);
+ else
+ fprintf(out, "\t0x%02x", byte);
+ write_hex_cnt++;
+}
+
+static void write_logo_mono(void)
+{
+ unsigned int i, j;
+ unsigned char val, bit;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
+ die("Image must be monochrome\n");
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++) {
+ for (j = 0; j < logo_width;) {
+ for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
+ if (logo_data[i][j].red)
+ val |= bit;
+ write_hex(val);
+ }
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void write_logo_vga16(void)
+{
+ unsigned int i, j, k;
+ unsigned char val;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ if (k == 16)
+ die("Image must use the 16 console colors only\n"
+ "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
+ "of colors\n");
+ }
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val = k<<4;
+ if (++j < logo_width) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val |= k;
+ }
+ write_hex(val);
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void write_logo_clut224(void)
+{
+ unsigned int i, j, k;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ if (k == logo_clutsize) {
+ if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
+ die("Image has more than %d colors\n"
+ "Use ppmquant(1) to reduce the number of colors\n",
+ MAX_LINUX_LOGO_COLORS);
+ logo_clut[logo_clutsize++] = logo_data[i][j];
+ }
+ }
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ write_hex(k+32);
+ }
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+ write_hex(logo_clut[i].red);
+ write_hex(logo_clut[i].green);
+ write_hex(logo_clut[i].blue);
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void write_logo_gray256(void)
+{
+ unsigned int i, j;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_gray(logo_data[i][j]))
+ die("Image must be grayscale\n");
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ write_hex(logo_data[i][j].red);
+
+ /* write logo structure and file footer */
+ write_footer();
+}
+
+static void die(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+
+ exit(1);
+}
+
+static void usage(void)
+{
+ die("\n"
+ "Usage: %s [options] <filename>\n"
+ "\n"
+ "Valid options:\n"
+ " -h : display this usage information\n"
+ " -n <name> : specify logo name (default: linux_logo)\n"
+ " -o <output> : output to file <output> instead of stdout\n"
+ " -t <type> : specify logo type, one of\n"
+ " mono : monochrome black/white\n"
+ " vga16 : 16 colors VGA text palette\n"
+ " clut224 : 224 colors (default)\n"
+ " gray256 : 256 levels grayscale\n"
+ "\n", programname);
+}
+
+int main(int argc, char *argv[])
+{
+ int opt;
+
+ programname = argv[0];
+
+ opterr = 0;
+ while (1) {
+ opt = getopt(argc, argv, "hn:o:t:");
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'h':
+ usage();
+ break;
+
+ case 'n':
+ logoname = optarg;
+ break;
+
+ case 'o':
+ outputname = optarg;
+ break;
+
+ case 't':
+ if (!strcmp(optarg, "mono"))
+ logo_type = LINUX_LOGO_MONO;
+ else if (!strcmp(optarg, "vga16"))
+ logo_type = LINUX_LOGO_VGA16;
+ else if (!strcmp(optarg, "clut224"))
+ logo_type = LINUX_LOGO_CLUT224;
+ else if (!strcmp(optarg, "gray256"))
+ logo_type = LINUX_LOGO_GRAY256;
+ else
+ usage();
+ break;
+
+ default:
+ usage();
+ break;
+ }
+ }
+ if (optind != argc-1)
+ usage();
+
+ filename = argv[optind];
+
+ read_image();
+ switch (logo_type) {
+ case LINUX_LOGO_MONO:
+ write_logo_mono();
+ break;
+
+ case LINUX_LOGO_VGA16:
+ write_logo_vga16();
+ break;
+
+ case LINUX_LOGO_CLUT224:
+ write_logo_clut224();
+ break;
+
+ case LINUX_LOGO_GRAY256:
+ write_logo_gray256();
+ break;
+ }
+ exit(0);
+}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 6c8843968a52..499eff7d3f65 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1213,31 +1213,21 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
notify_remote_via_irq(irq);
}
-static DEFINE_PER_CPU(unsigned, xed_nesting_count);
-
static void __xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
- int cpu = get_cpu();
- unsigned count;
+ int cpu = smp_processor_id();
do {
vcpu_info->evtchn_upcall_pending = 0;
- if (__this_cpu_inc_return(xed_nesting_count) - 1)
- goto out;
-
xen_evtchn_handle_events(cpu);
BUG_ON(!irqs_disabled());
- count = __this_cpu_read(xed_nesting_count);
- __this_cpu_write(xed_nesting_count, 0);
- } while (count != 1 || vcpu_info->evtchn_upcall_pending);
-
-out:
+ virt_rmb(); /* Hypervisor can set upcall pending. */
- put_cpu();
+ } while (vcpu_info->evtchn_upcall_pending);
}
void xen_evtchn_do_upcall(struct pt_regs *regs)
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 91e44c04f787..9a3960ecff6c 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -81,7 +81,7 @@ void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
-bool gntdev_account_mapped_pages(int count);
+bool gntdev_test_page_count(unsigned int count);
int gntdev_map_grant_pages(struct gntdev_grant_map *map);
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 2c4f324f8626..63f0857bf62d 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -446,7 +446,7 @@ dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
{
struct gntdev_grant_map *map;
- if (unlikely(count <= 0))
+ if (unlikely(gntdev_test_page_count(count)))
return ERR_PTR(-EINVAL);
if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
@@ -459,11 +459,6 @@ dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
if (!map)
return ERR_PTR(-ENOMEM);
- if (unlikely(gntdev_account_mapped_pages(count))) {
- pr_debug("can't map %d pages: over limit\n", count);
- gntdev_put_map(NULL, map);
- return ERR_PTR(-ENOMEM);
- }
return map;
}
@@ -771,7 +766,7 @@ long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
- if (unlikely(op.count <= 0))
+ if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
@@ -818,7 +813,7 @@ long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
- if (unlikely(op.count <= 0))
+ if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a04ddf2a68af..4fc83e3f5ad3 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -55,12 +55,10 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
-static int limit = 1024*1024;
-module_param(limit, int, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
- "the gntdev device");
-
-static atomic_t pages_mapped = ATOMIC_INIT(0);
+static unsigned int limit = 64*1024;
+module_param(limit, uint, 0644);
+MODULE_PARM_DESC(limit,
+ "Maximum number of grants that may be mapped by one mapping request");
static int use_ptemod;
@@ -71,9 +69,9 @@ static struct miscdevice gntdev_miscdev;
/* ------------------------------------------------------------------ */
-bool gntdev_account_mapped_pages(int count)
+bool gntdev_test_page_count(unsigned int count)
{
- return atomic_add_return(count, &pages_mapped) > limit;
+ return !count || count > limit;
}
static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -114,14 +112,14 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
gnttab_free_pages(map->count, map->pages);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
- kfree(map->frames);
+ kvfree(map->frames);
#endif
- kfree(map->pages);
- kfree(map->grants);
- kfree(map->map_ops);
- kfree(map->unmap_ops);
- kfree(map->kmap_ops);
- kfree(map->kunmap_ops);
+ kvfree(map->pages);
+ kvfree(map->grants);
+ kvfree(map->map_ops);
+ kvfree(map->unmap_ops);
+ kvfree(map->kmap_ops);
+ kvfree(map->kunmap_ops);
kfree(map);
}
@@ -135,12 +133,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
if (NULL == add)
return NULL;
- add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
- add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
- add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
- add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
- add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
- add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+ add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
+ add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
+ add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
+ add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
+ add->kunmap_ops = kvcalloc(count,
+ sizeof(add->kunmap_ops[0]), GFP_KERNEL);
+ add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
@@ -159,8 +158,8 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
struct gnttab_dma_alloc_args args;
- add->frames = kcalloc(count, sizeof(add->frames[0]),
- GFP_KERNEL);
+ add->frames = kvcalloc(count, sizeof(add->frames[0]),
+ GFP_KERNEL);
if (!add->frames)
goto err;
@@ -241,8 +240,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
- atomic_sub(map->count, &pages_mapped);
-
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -506,7 +503,6 @@ static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
static int gntdev_open(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv;
- int ret = 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -518,16 +514,12 @@ static int gntdev_open(struct inode *inode, struct file *flip)
#ifdef CONFIG_XEN_GNTDEV_DMABUF
priv->dmabuf_priv = gntdev_dmabuf_init(flip);
if (IS_ERR(priv->dmabuf_priv)) {
- ret = PTR_ERR(priv->dmabuf_priv);
- kfree(priv);
- return ret;
- }
-#endif
+ int ret = PTR_ERR(priv->dmabuf_priv);
- if (ret) {
kfree(priv);
return ret;
}
+#endif
flip->private_data = priv;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
@@ -573,7 +565,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, add %d\n", priv, op.count);
- if (unlikely(op.count <= 0))
+ if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
err = -ENOMEM;
@@ -581,12 +573,6 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
if (!map)
return err;
- if (unlikely(gntdev_account_mapped_pages(op.count))) {
- pr_debug("can't map: over limit\n");
- gntdev_put_map(NULL, map);
- return err;
- }
-
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
gntdev_put_map(NULL, map);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 5e30602fdbad..59e85e408c23 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -74,7 +74,7 @@ static int xen_allocate_irq(struct pci_dev *pdev)
"xen-platform-pci", pdev);
}
-static int platform_pci_resume(struct pci_dev *pdev)
+static int platform_pci_resume(struct device *dev)
{
int err;
@@ -83,7 +83,7 @@ static int platform_pci_resume(struct pci_dev *pdev)
err = xen_set_callback_via(callback_via);
if (err) {
- dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ dev_err(dev, "platform_pci_resume failure!\n");
return err;
}
return 0;
@@ -168,13 +168,17 @@ static const struct pci_device_id platform_pci_tbl[] = {
{0,}
};
+static struct dev_pm_ops platform_pm_ops = {
+ .resume_noirq = platform_pci_resume,
+};
+
static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
-#ifdef CONFIG_PM
- .resume_early = platform_pci_resume,
-#endif
+ .driver = {
+ .pm = &platform_pm_ops,
+ },
};
builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 5b471889d723..c21be6e9d38a 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -232,9 +232,16 @@ int xenbus_dev_probe(struct device *_dev)
return err;
}
+ if (!try_module_get(drv->driver.owner)) {
+ dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
+ drv->driver.name);
+ err = -ESRCH;
+ goto fail;
+ }
+
err = drv->probe(dev, id);
if (err)
- goto fail;
+ goto fail_put;
err = watch_otherend(dev);
if (err) {
@@ -244,6 +251,8 @@ int xenbus_dev_probe(struct device *_dev)
}
return 0;
+fail_put:
+ module_put(drv->driver.owner);
fail:
xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
xenbus_switch_state(dev, XenbusStateClosed);
@@ -263,6 +272,8 @@ int xenbus_dev_remove(struct device *_dev)
if (drv->remove)
drv->remove(dev);
+ module_put(drv->driver.owner);
+
free_otherend_details(dev);
xenbus_switch_state(dev, XenbusStateClosed);